This file is indexed.

/usr/share/arc/LRMSInfo.pm is in nordugrid-arc-arex 5.4.2-1build1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
package LRMSInfo;

use Storable;

use strict;

use LogUtils;
use InfoChecker;

##############################################################################
# To include a new LRMS:
##############################################################################
#
# Each LRMS specific module needs to:
#
# 1. Provide subroutine get_lrms_info. The interfaces are documented in this
#    file. All variables required in lrms_info_schema should be defined in
#    LRMS modules.  Returning empty variable "" is perfectly ok if variable
#    does not apply to a LRMS.
#    
# 2. Provide subroutine get_lrms_options_schema. The return value must be a
#    schema describing the options that are recognized by the plugin.

##############################################################################
# Public interface to LRMS modules
##############################################################################
#
# use LRMSInfo;
#
# my $collector = LRMSInfo->new();
# my $lrms_info = $collector->get_info($options);
#
# Arguments:
#    $options - a hash reference containing options. This module will check it
#               against $lrms_options_schema and the LRMS plugin's own schema
#               and then pass it on to the LRMS plugin.
#
# Returns:
#    $lrms_info - a hash reference containing all information collected from
#                 the LRMS. This module will check it against
#                 $lrms_info_schema (see below)


##############################################################################
# Schemas
##############################################################################
#
# The usage of these schemas is described in InfoChecker.pm
#
#    $lrms_options_schema - for checking $options hash. This is just a minimal
#                           schema, LRMS plugins may use an extended version
#    $lrms_info_schema - for checking data returned by LRMS modules

my $lrms_options_schema = {
    'lrms' => '',              # name of the LRMS module
    'queues' => {              # queue names are keys in this hash
        '*' => {
	    'users' => [ '' ]  # list of user IDs to query in the LRMS
        }
    },
    'jobs' => [ '' ]           # list of jobs IDs to query in the LRMS
};

my $lrms_info_schema = {
    'cluster' => {
        'lrms_type'       => '',
        'lrms_glue_type'  => '*',  # one of: bqs condor fork loadleveler lsf openpbs sungridengine torque torquemaui ...
        'lrms_version'    => '',
        'schedpolicy'     => '*',
        'totalcpus'       => '',
        'queuedcpus'      => '',
        'usedcpus'        => '',
        'queuedjobs'      => '',
        'runningjobs'     => '',
        'cpudistribution' => ''
    },
    'queues' => {
        '*' => {
            'status'       => '',
            'maxrunning'   => '',  # the max number of jobs allowed to run in this queue
            'maxqueuable'  => '*', # the max number of jobs allowed to be queued
            'maxuserrun'   => '*', # the max number of jobs that a single user can run
            'maxcputime'   => '*', # units: seconds (per-slot)
            'maxtotalcputime' => '*', # units: seconds
            'mincputime'   => '*', # units: seconds
            'defaultcput'  => '*', # units: seconds
            'maxwalltime'  => '*', # units: seconds
            'minwalltime'  => '*', # units: seconds
            'defaultwallt' => '*', # units: seconds
            'running'      => '',  # the number of cpus being occupied by running jobs
            'queued'       => '',  # the number of queued jobs
            'suspended'    => '*', # the number of suspended jobs
            'total'        => '*', # the total number of jobs in this queue
            'totalcpus'    => '',  # the number of cpus dedicated to this queue
            'preemption' => '*',
            'acl_users'  => [ '*' ],
            'users' => {
                '*' => {
                    'freecpus'    => {
                        '*' => ''  # key: # of cpus, value: time limit in minutes (0 for unlimited)
                     },
                    'queuelength' => ''
                }
            }
        }
    },
    'jobs' => {
        '*' => {
            'status'      => '',
            'cpus'        => '*',
            'rank'        => '*',
            'mem'         => '*', # units: kB
            'walltime'    => '*', # units: seconds
            'cputime'     => '*', # units: seconds
            'reqwalltime' => '*', # units: seconds
            'reqcputime'  => '*', # units: seconds
            'nodes'     => [ '*' ], # names of nodes where the job runs
            'comment'   => [ '*' ]
        }
    },
    'nodes' => {
        '*' => {                 # key: hostname of the node (as known to the LRMS)
            'isavailable' => '',      # is available for running jobs
            'isfree'      => '',      # is available and not yet fully used, can accept more jobs
            'tags'        => [ '*' ], # tags associated to nodes, i.e. node properties in PBS
            'vmem'        =>   '*',   # virtual memory, units: kb
            'pmem'        =>   '*',   # physical memory, units: kb
            'slots'       =>   '*',   # job slots or virtual processors
            'lcpus'       =>   '*',   # cpus visible to the os
            'pcpus'       =>   '*',   # number of sockets
            'sysname'     =>   '*',   # what would uname -s print on the node
            'release'     =>   '*',   # what would uname -r print on the node
            'machine'     =>   '*',   # what would uname -m print (if the node would run linux)
        }
    }
};

our $log = LogUtils->getLogger("LRMSInfo");


sub collect($) {
    my ($options) = @_;
    my ($checker, @messages);

    my ($lrms_name, $share) = split / /, $options->{lrms};
    $options->{scheduling_policy} = $options->{SchedulingPolicy} if $options->{SchedulingPolicy};
    $log->error('lrms option is missing') unless $lrms_name;
    load_lrms($lrms_name);

    # merge schema exported by the LRMS plugin
    my $schema = { %$lrms_options_schema, %{get_lrms_options_schema()} };

    $checker = InfoChecker->new($schema);
    @messages = $checker->verify($options);
    $log->warning("config key options->$_") foreach @messages;
    $log->fatal("Some required options are missing") if @messages;

    my $result = get_lrms_info($options);

    use Data::Dumper('Dumper');
    my $custom_lrms_schema = customize_info_schema($lrms_info_schema, $options);
    $checker = InfoChecker->new($custom_lrms_schema);
    @messages = $checker->verify($result);
    $log->warning("return value lrmsinfo->$_") foreach @messages;

    # some backends leave extra spaces -- trim them
    $result->{cluster}{cpudistribution} =~ s/^\s+//;
    $result->{cluster}{cpudistribution} =~ s/\s+$//;

    # make sure nodes are unique
    for my $job (values %{$result->{jobs}}) {
        next unless $job->{nodes};
        my %nodes;
        $nodes{$_} = 1 for @{$job->{nodes}};
        $job->{nodes} = [ sort keys %nodes ];
    }

    return $result;
}

# Loads the needed LRMS plugin at runtime
# First try to load XYZmod.pm (implementing the native ARC1 interface)
# otherwise try to load XYZ.pm (ARC0.6 plugin)

sub load_lrms($) {
    my $lrms_name = uc(shift);

    my $module = $lrms_name."mod";
    eval { require "$module.pm" };
    
    if ($@) {
        $log->debug("require for $module returned: $@");
        $log->debug("Using ARC0.6 compatible $lrms_name module");

        require ARC0mod;
        ARC0mod::load_lrms($lrms_name);
        $module = "ARC0mod";
    }
    import $module qw(get_lrms_info get_lrms_options_schema);
}

# prepares a custom schema that has individual keys for each queue and each job
# which is named in $options

sub customize_info_schema($$) {
    my ($info_schema,$options) = @_;

    my $new_schema;

    # make a deep copy
    $new_schema = Storable::dclone($info_schema);

    # adjust schema for each job: Replace "*" with actual job id's
    for my $job (@{$options->{jobs}}) {
        $new_schema->{jobs}{$job} = $new_schema->{jobs}{"*"};
    }
    delete $new_schema->{jobs}{"*"};

    # adjust schema for each queue: Replace "*" with actual queue names
    for my $queue (keys %{$options->{queues}}) {
        $new_schema->{queues}{$queue} = $new_schema->{queues}{"*"};
    }
    delete $new_schema->{queues}{"*"};

    return $new_schema;
}

#### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ##### TEST ####

my $opt1 = {lrms => 'fork',
            sge_root => '/opt/n1ge6',
            sge_cell => 'cello',
            sge_bin_path => '/opt/n1ge6/bin/lx24-x86',
            queues => {'shar' => {users => []}, 'loca' => {users => ['joe','pete'], maxjobs => '4 2'}},
            jobs => [qw(7 101 5865)]
           };

my $opt2 = {lrms => 'sge',
            sge_root => '/opt/n1ge6',
            sge_cell => 'cello',
            sge_bin_path => '/opt/n1ge6/bin/lx24-amd64',
            queues => {'shar' => {users => []}, 'all.q' => {users => ['joe','pete']}},
            jobs => [63, 36006]
           };

my $opt3 = {lrms => 'pbs',
            pbs_bin_path => '/opt/torque/bin',
            pbs_log_path => '/var/spool/torque/server_logs',
            queues => {'batch' => {users => ['joe','pete']}},
            jobs => [63, 453]
           };

sub test {
    my $options = shift;
    LogUtils::level('VERBOSE');
    require Data::Dumper; import Data::Dumper qw(Dumper);
    $log->debug("Options: " . Dumper($options));
    my $results = LRMSInfo::collect($options);
    $log->debug("Results: " . Dumper($results));
}

#test($opt3);

1;