This file is indexed.

/usr/share/openmpi/mca-coll-ml.config is in openmpi-common 1.10.2-8ubuntu1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
##################################
# ML collective configuration file
##################################
# NOTE (by Pasha):
# Since ML configuration infrastructure is limited on this stage we do not support some tunings, even so parser
# understands this values and keys, but we do not have place to load all this values.
#   threshold  - ML infrastructure does not handle multiple thresholds.
#   fragmentation  - ML infrastructure does not fragmentation tuning per collective.
##################################

# Defining collective section
[BARRIER]                   
# Defining message size section. We will support small/large. In future we may add more options. Barrier is very specific case, because it is only collective that does not transfer any data, so for this specific case we use small          
<small>
# Since ML does not define any algorithm for BARRIER, we just use default. Later we have to introduce some algorithm name for Barrier
algorithm = ML_BARRIER_DEFAULT

# Hierarchy setup:
#
# full_hr - means all possible levels of hierarchy (list of possible is defined by user command line)
# full_hr_no_basesocket - means all possible levels of hierarchy (list of possible is defined by user command line)
#                         except the basesocket subgroup.
# ptp_only - only ptp hierarchy
# iboffload_only - only iboffload hierarhcy
hierarchy = full_hr

[IBARRIER]
<small>
algorithm = ML_BARRIER_DEFAULT
hierarchy = full_hr

[BCAST]
<small>
# bcast supports: ML_BCAST_SMALL_DATA_KNOWN, ML_BCAST_SMALL_DATA_UNKNOWN, ML_BCAST_SMALL_DATA_SEQUENTIAL
algorithm = ML_BCAST_SMALL_DATA_KNOWN 
hierarchy = full_hr       
<large>
# bcast supports: ML_BCAST_LARGE_DATA_KNOWN, ML_BCAST_LARGE_DATA_UNKNOWN, ML_BCAST_LARGE_DATA_SEQUENTIAL
algorithm = ML_BCAST_LARGE_DATA_KNOWN
hierarchy = full_hr

[IBCAST]
<small>
algorithm = ML_BCAST_SMALL_DATA_KNOWN
hierarchy = full_hr       
<large>
algorithm = ML_BCAST_LARGE_DATA_KNOWN
hierarchy = full_hr

[GATHER]
<small>
# gather supports: ML_SMALL_DATA_GATHER
algorithm = ML_SMALL_DATA_GATHER
hierarchy = full_hr
<large>
# gather supports: ML_LARGE_DATA_GATHER
algorithm = ML_LARGE_DATA_GATHER
hierarchy = full_hr

[IGATHER]
<small>
# gather supports: ML_SMALL_DATA_GATHER
algorithm = ML_SMALL_DATA_GATHER
hierarchy = full_hr
<large>
# gather supports: ML_LARGE_DATA_GATHER
algorithm = ML_LARGE_DATA_GATHER
hierarchy = full_hr

[ALLGATHER]
<small>
# allgather supports: ML_SMALL_DATA_ALLGATHER
algorithm = ML_SMALL_DATA_ALLGATHER
hierarchy = full_hr
<large>
# allgather supports: ML_LARGE_DATA_ALLGATHER
algorithm = ML_LARGE_DATA_ALLGATHER
hierarchy = full_hr

[IALLGATHER]
<small>
# allgather supports: ML_SMALL_DATA_ALLGATHER
algorithm = ML_SMALL_DATA_ALLGATHER
hierarchy = full_hr
<large>
# allgather supports: ML_LARGE_DATA_ALLGATHER
algorithm = ML_LARGE_DATA_ALLGATHER
hierarchy = full_hr

[ALLTOALL]
<small>
# alltoall supports: ML_SMALL_DATA_ALLTOALL
algorithm = ML_SMALL_DATA_ALLTOALL
hierarchy = ptp_only
<large>
# alltoall supports: ML_LARGE_DATA_ALLTOALL
algorithm = ML_LARGE_DATA_ALLTOALL
hierarchy = ptp_only

[IALLTOALL]
<small>
# alltoall supports: ML_SMALL_DATA_ALLTOALL
algorithm = ML_SMALL_DATA_ALLTOALL
hierarchy = ptp_only
<large>
# alltoall supports: ML_LARGE_DATA_ALLTOALL
algorithm = ML_LARGE_DATA_ALLTOALL
hierarchy = ptp_only

[ALLREDUCE]
<small>
# allreduce supports: ML_SMALL_DATA_ALLREDUCE
algorithm = ML_SMALL_DATA_ALLREDUCE
hierarchy = full_hr
<large>
# allreduce supports: ML_LARGE_DATA_ALLREDUCE
algorithm = ML_LARGE_DATA_ALLREDUCE
hierarchy = full_hr

[IALLREDUCE]
<small>
# allreduce supports: ML_SMALL_DATA_ALLREDUCE
algorithm = ML_SMALL_DATA_ALLREDUCE
hierarchy = full_hr
<large>
# allreduce supports: ML_LARGE_DATA_ALLREDUCE
algorithm = ML_LARGE_DATA_ALLREDUCE
hierarchy = full_hr

[REDUCE]
<small>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_SMALL_DATA_REDUCE
hierarchy = full_hr
<large>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_LARGE_DATA_REDUCE
hierarchy = full_hr

[IREDUCE]
<small>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_SMALL_DATA_REDUCE
hierarchy = full_hr
<large>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_LARGE_DATA_REDUCE
hierarchy = full_hr



[SCATTER]
<small>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_SCATTER_SMALL_DATA_SEQUENTIAL
hierarchy = full_hr
<large>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_SCATTER_SMALL_DATA_SEQUENTIAL
hierarchy = full_hr

[ISCATTER]
<small>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_SCATTER_SMALL_DATA_SEQUENTIAL
hierarchy = full_hr
<large>
# scatter supports: ML_SCATTER_SMALL_DATA_SEQUENTIAL
algorithm = ML_SCATTER_SMALL_DATA_SEQUENTIAL
hierarchy = full_hr