This file is indexed.

/usr/share/doc/ganeti/html/design-2.0.html is in ganeti-doc 2.16.0~rc2-1build1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">

<html xmlns="http://www.w3.org/1999/xhtml" lang="en">
  <head>
    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
    <title>Ganeti 2.0 design &#8212; Ganeti 2.16.0~rc2 documentation</title>
    <link rel="stylesheet" href="_static/style.css" type="text/css" />
    <link rel="stylesheet" href="_static/pygments.css" type="text/css" />
    <script type="text/javascript">
      var DOCUMENTATION_OPTIONS = {
        URL_ROOT:    './',
        VERSION:     '2.16.0~rc2',
        COLLAPSE_INDEX: false,
        FILE_SUFFIX: '.html',
        HAS_SOURCE:  true,
        SOURCELINK_SUFFIX: '.txt'
      };
    </script>
    <script type="text/javascript" src="_static/jquery.js"></script>
    <script type="text/javascript" src="_static/underscore.js"></script>
    <script type="text/javascript" src="_static/doctools.js"></script>
    <link rel="search" title="Search" href="search.html" />
    <link rel="next" title="Ganeti 2.1 design" href="design-2.1.html" />
    <link rel="prev" title="Welcome to Ganeti’s documentation!" href="index.html" /> 
  </head>
  <body>
    <div class="related" role="navigation" aria-label="related navigation">
      <h3>Navigation</h3>
      <ul>
        <li class="right" style="margin-right: 10px">
          <a href="design-2.1.html" title="Ganeti 2.1 design"
             accesskey="N">next</a></li>
        <li class="right" >
          <a href="index.html" title="Welcome to Ganeti’s documentation!"
             accesskey="P">previous</a> |</li>
        <li class="nav-item nav-item-0"><a href="index.html">Ganeti 2.16.0~rc2 documentation</a> &#187;</li> 
      </ul>
    </div>  

    <div class="document">
      <div class="documentwrapper">
        <div class="bodywrapper">
          <div class="body" role="main">
            
  <div class="section" id="ganeti-2-0-design">
<h1><a class="toc-backref" href="#id9">Ganeti 2.0 design</a><a class="headerlink" href="#ganeti-2-0-design" title="Permalink to this headline"></a></h1>
<p>This document describes the major changes in Ganeti 2.0 compared to
the 1.2 version.</p>
<p>The 2.0 version will constitute a rewrite of the ‘core’ architecture,
paving the way for additional features in future 2.x versions.</p>
<div class="contents topic" id="contents">
<p class="topic-title first">Contents</p>
<ul class="simple">
<li><a class="reference internal" href="#ganeti-2-0-design" id="id9">Ganeti 2.0 design</a><ul>
<li><a class="reference internal" href="#objective" id="id10">Objective</a></li>
<li><a class="reference internal" href="#background" id="id11">Background</a><ul>
<li><a class="reference internal" href="#scalability-problems" id="id12">Scalability problems</a></li>
<li><a class="reference internal" href="#artificial-restrictions" id="id13">Artificial restrictions</a></li>
<li><a class="reference internal" href="#architecture-issues" id="id14">Architecture issues</a></li>
</ul>
</li>
<li><a class="reference internal" href="#overview" id="id15">Overview</a></li>
<li><a class="reference internal" href="#detailed-design" id="id16">Detailed design</a><ul>
<li><a class="reference internal" href="#core-changes" id="id17">Core changes</a></li>
<li><a class="reference internal" href="#feature-changes" id="id18">Feature changes</a></li>
<li><a class="reference internal" href="#interface-changes" id="id19">Interface changes</a></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="section" id="objective">
<h2><a class="toc-backref" href="#id10">Objective</a><a class="headerlink" href="#objective" title="Permalink to this headline"></a></h2>
<p>Ganeti 1.2 has many scalability issues and restrictions due to its
roots as software for managing small and ‘static’ clusters.</p>
<p>Version 2.0 will attempt to remedy first the scalability issues and
then the restrictions.</p>
</div>
<div class="section" id="background">
<h2><a class="toc-backref" href="#id11">Background</a><a class="headerlink" href="#background" title="Permalink to this headline"></a></h2>
<p>While Ganeti 1.2 is usable, it severely limits the flexibility of the
cluster administration and imposes a very rigid model. It has the
following main scalability issues:</p>
<ul class="simple">
<li>only one operation at a time on the cluster <a class="footnote-reference" href="#id2" id="id1">[1]</a></li>
<li>poor handling of node failures in the cluster</li>
<li>mixing hypervisors in a cluster not allowed</li>
</ul>
<p>It also has a number of artificial restrictions, due to historical
design:</p>
<ul class="simple">
<li>fixed number of disks (two) per instance</li>
<li>fixed number of NICs</li>
</ul>
<table class="docutils footnote" frame="void" id="id2" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label"><a class="fn-backref" href="#id1">[1]</a></td><td>Replace disks will release the lock, but this is an exception
and not a recommended way to operate</td></tr>
</tbody>
</table>
<p>The 2.0 version is intended to address some of these problems, and
create a more flexible code base for future developments.</p>
<p>Among these problems, the single-operation at a time restriction is
biggest issue with the current version of Ganeti. It is such a big
impediment in operating bigger clusters that many times one is tempted
to remove the lock just to do a simple operation like start instance
while an OS installation is running.</p>
<div class="section" id="scalability-problems">
<h3><a class="toc-backref" href="#id12">Scalability problems</a><a class="headerlink" href="#scalability-problems" title="Permalink to this headline"></a></h3>
<p>Ganeti 1.2 has a single global lock, which is used for all cluster
operations.  This has been painful at various times, for example:</p>
<ul class="simple">
<li>It is impossible for two people to efficiently interact with a cluster
(for example for debugging) at the same time.</li>
<li>When batch jobs are running it’s impossible to do other work (for
example failovers/fixes) on a cluster.</li>
</ul>
<p>This poses scalability problems: as clusters grow in node and instance
size it’s a lot more likely that operations which one could conceive
should run in parallel (for example because they happen on different
nodes) are actually stalling each other while waiting for the global
lock, without a real reason for that to happen.</p>
<p>One of the main causes of this global lock (beside the higher
difficulty of ensuring data consistency in a more granular lock model)
is the fact that currently there is no long-lived process in Ganeti
that can coordinate multiple operations. Each command tries to acquire
the so called <em>cmd</em> lock and when it succeeds, it takes complete
ownership of the cluster configuration and state.</p>
<p>Other scalability problems are due the design of the DRBD device
model, which assumed at its creation a low (one to four) number of
instances per node, which is no longer true with today’s hardware.</p>
</div>
<div class="section" id="artificial-restrictions">
<h3><a class="toc-backref" href="#id13">Artificial restrictions</a><a class="headerlink" href="#artificial-restrictions" title="Permalink to this headline"></a></h3>
<p>Ganeti 1.2 (and previous versions) have a fixed two-disks, one-NIC per
instance model. This is a purely artificial restrictions, but it
touches multiple areas (configuration, import/export, command line)
that it’s more fitted to a major release than a minor one.</p>
</div>
<div class="section" id="architecture-issues">
<h3><a class="toc-backref" href="#id14">Architecture issues</a><a class="headerlink" href="#architecture-issues" title="Permalink to this headline"></a></h3>
<p>The fact that each command is a separate process that reads the
cluster state, executes the command, and saves the new state is also
an issue on big clusters where the configuration data for the cluster
begins to be non-trivial in size.</p>
</div>
</div>
<div class="section" id="overview">
<h2><a class="toc-backref" href="#id15">Overview</a><a class="headerlink" href="#overview" title="Permalink to this headline"></a></h2>
<p>In order to solve the scalability problems, a rewrite of the core
design of Ganeti is required. While the cluster operations themselves
won’t change (e.g. start instance will do the same things, the way
these operations are scheduled internally will change radically.</p>
<p>The new design will change the cluster architecture to:</p>
<img src="_images/graphviz-ba7b7a25e90d87a8b465c444896e68826646535f.png" alt="digraph &quot;ganeti-2.0-architecture&quot; {
compound=false
concentrate=true
mclimit=100.0
nslimit=100.0
edge[fontsize=&quot;8&quot; fontname=&quot;Helvetica-Oblique&quot;]
node[width=&quot;0&quot; height=&quot;0&quot; fontsize=&quot;12&quot; fontcolor=&quot;black&quot; shape=rect]

subgraph outside {
  rclient[label=&quot;external clients&quot;]
  label=&quot;Outside the cluster&quot;
}

subgraph cluster_inside {
  label=&quot;ganeti cluster&quot;
  labeljust=l
  subgraph cluster_master_node {
    label=&quot;master node&quot;
    rapi[label=&quot;RAPI daemon&quot;]
    cli[label=&quot;CLI&quot;]
    watcher[label=&quot;Watcher&quot;]
    burnin[label=&quot;Burnin&quot;]
    masterd[shape=record style=filled label=&quot;{ &lt;luxi&gt; luxi endpoint | master I/O thread | job queue | {&lt;w1&gt; worker| &lt;w2&gt; worker | &lt;w3&gt; worker }}&quot;]
    {rapi;cli;watcher;burnin} -&gt; masterd:luxi [label=&quot;LUXI&quot; labelpos=100]
  }

  subgraph cluster_nodes {
      label=&quot;nodes&quot;
      noded1 [shape=record label=&quot;{ RPC listener | Disk management | Network management | Hypervisor } &quot;]
      noded2 [shape=record label=&quot;{ RPC listener | Disk management | Network management | Hypervisor } &quot;]
      noded3 [shape=record label=&quot;{ RPC listener | Disk management | Network management | Hypervisor } &quot;]
  }
  masterd:w2 -&gt; {noded1;noded2;noded3} [label=&quot;node RPC&quot;]
  cli -&gt; {noded1;noded2;noded3} [label=&quot;SSH&quot;]
}

rclient -&gt; rapi [label=&quot;RAPI protocol&quot;]
}" />
<p>This differs from the 1.2 architecture by the addition of the master
daemon, which will be the only entity to talk to the node daemons.</p>
</div>
<div class="section" id="detailed-design">
<h2><a class="toc-backref" href="#id16">Detailed design</a><a class="headerlink" href="#detailed-design" title="Permalink to this headline"></a></h2>
<p>The changes for 2.0 can be split into roughly three areas:</p>
<ul class="simple">
<li>core changes that affect the design of the software</li>
<li>features (or restriction removals) but which do not have a wide
impact on the design</li>
<li>user-level and API-level changes which translate into differences for
the operation of the cluster</li>
</ul>
<div class="section" id="core-changes">
<h3><a class="toc-backref" href="#id17">Core changes</a><a class="headerlink" href="#core-changes" title="Permalink to this headline"></a></h3>
<p>The main changes will be switching from a per-process model to a
daemon based model, where the individual gnt-* commands will be
clients that talk to this daemon (see <a class="reference internal" href="#master-daemon">Master daemon</a>). This will
allow us to get rid of the global cluster lock for most operations,
having instead a per-object lock (see <a class="reference internal" href="#granular-locking">Granular locking</a>). Also, the
daemon will be able to queue jobs, and this will allow the individual
clients to submit jobs without waiting for them to finish, and also
see the result of old requests (see <a class="reference internal" href="#job-queue">Job Queue</a>).</p>
<p>Beside these major changes, another ‘core’ change but that will not be
as visible to the users will be changing the model of object attribute
storage, and separate that into name spaces (such that an Xen PVM
instance will not have the Xen HVM parameters). This will allow future
flexibility in defining additional parameters. For more details see
<a class="reference internal" href="#object-parameters">Object parameters</a>.</p>
<p>The various changes brought in by the master daemon model and the
read-write RAPI will require changes to the cluster security; we move
away from Twisted and use HTTP(s) for intra- and extra-cluster
communications. For more details, see the security document in the
doc/ directory.</p>
<div class="section" id="master-daemon">
<h4>Master daemon<a class="headerlink" href="#master-daemon" title="Permalink to this headline"></a></h4>
<p>In Ganeti 2.0, we will have the following <em>entities</em>:</p>
<ul class="simple">
<li>the master daemon (on the master node)</li>
<li>the node daemon (on all nodes)</li>
<li>the command line tools (on the master node)</li>
<li>the RAPI daemon (on the master node)</li>
</ul>
<p>The master-daemon related interaction paths are:</p>
<ul class="simple">
<li>(CLI tools/RAPI daemon) and the master daemon, via the so called
<em>LUXI</em> API</li>
<li>the master daemon and the node daemons, via the node RPC</li>
</ul>
<p>There are also some additional interaction paths for exceptional cases:</p>
<ul class="simple">
<li>CLI tools might access via SSH the nodes (for <code class="docutils literal"><span class="pre">gnt-cluster</span> <span class="pre">copyfile</span></code>
and <code class="docutils literal"><span class="pre">gnt-cluster</span> <span class="pre">command</span></code>)</li>
<li>master failover is a special case when a non-master node will SSH
and do node-RPC calls to the current master</li>
</ul>
<p>The protocol between the master daemon and the node daemons will be
changed from (Ganeti 1.2) Twisted PB (perspective broker) to HTTP(S),
using a simple PUT/GET of JSON-encoded messages. This is done due to
difficulties in working with the Twisted framework and its protocols
in a multithreaded environment, which we can overcome by using a
simpler stack (see the caveats section).</p>
<p>The protocol between the CLI/RAPI and the master daemon will be a
custom one (called <em>LUXI</em>): on a UNIX socket on the master node, with
rights restricted by filesystem permissions, the CLI/RAPI will talk to
the master daemon using JSON-encoded messages.</p>
<p>The operations supported over this internal protocol will be encoded
via a python library that will expose a simple API for its
users. Internally, the protocol will simply encode all objects in JSON
format and decode them on the receiver side.</p>
<p>For more details about the RAPI daemon see <a class="reference internal" href="#remote-api-changes">Remote API changes</a>, and
for the node daemon see <a class="reference internal" href="#node-daemon-changes">Node daemon changes</a>.</p>
<div class="section" id="the-luxi-protocol">
<span id="luxi"></span><h5>The LUXI protocol<a class="headerlink" href="#the-luxi-protocol" title="Permalink to this headline"></a></h5>
<p>As described above, the protocol for making requests or queries to the
master daemon will be a UNIX-socket based simple RPC of JSON-encoded
messages.</p>
<p>The choice of UNIX was in order to get rid of the need of
authentication and authorisation inside Ganeti; for 2.0, the
permissions on the Unix socket itself will determine the access
rights.</p>
<p>We will have two main classes of operations over this API:</p>
<ul class="simple">
<li>cluster query functions</li>
<li>job related functions</li>
</ul>
<p>The cluster query functions are usually short-duration, and are the
equivalent of the <code class="docutils literal"><span class="pre">OP_QUERY_*</span></code> opcodes in Ganeti 1.2 (and they are
internally implemented still with these opcodes). The clients are
guaranteed to receive the response in a reasonable time via a timeout.</p>
<p>The job-related functions will be:</p>
<ul class="simple">
<li>submit job</li>
<li>query job (which could also be categorized in the query-functions)</li>
<li>archive job (see the job queue design doc)</li>
<li>wait for job change, which allows a client to wait without polling</li>
</ul>
<p>For more details of the actual operation list, see the <a class="reference internal" href="#job-queue">Job Queue</a>.</p>
<p>Both requests and responses will consist of a JSON-encoded message
followed by the <code class="docutils literal"><span class="pre">ETX</span></code> character (ASCII decimal 3), which is not a
valid character in JSON messages and thus can serve as a message
delimiter. The contents of the messages will be a dictionary with two
fields:</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">method:</th><td class="field-body">the name of the method called</td>
</tr>
<tr class="field-even field"><th class="field-name">args:</th><td class="field-body">the arguments to the method, as a list (no keyword arguments allowed)</td>
</tr>
</tbody>
</table>
<p>Responses will follow the same format, with the two fields being:</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">success:</th><td class="field-body">a boolean denoting the success of the operation</td>
</tr>
<tr class="field-even field"><th class="field-name">result:</th><td class="field-body">the actual result, or error message in case of failure</td>
</tr>
</tbody>
</table>
<p>There are two special value for the result field:</p>
<ul class="simple">
<li>in the case that the operation failed, and this field is a list of
length two, the client library will try to interpret is as an
exception, the first element being the exception type and the second
one the actual exception arguments; this will allow a simple method of
passing Ganeti-related exception across the interface</li>
<li>for the <em>WaitForChange</em> call (that waits on the server for a job to
change status), if the result is equal to <code class="docutils literal"><span class="pre">nochange</span></code> instead of the
usual result for this call (a list of changes), then the library will
internally retry the call; this is done in order to differentiate
internally between master daemon hung and job simply not changed</li>
</ul>
<p>Users of the API that don’t use the provided python library should
take care of the above two cases.</p>
</div>
<div class="section" id="master-daemon-implementation">
<h5>Master daemon implementation<a class="headerlink" href="#master-daemon-implementation" title="Permalink to this headline"></a></h5>
<p>The daemon will be based around a main I/O thread that will wait for
new requests from the clients, and that does the setup/shutdown of the
other thread (pools).</p>
<p>There will two other classes of threads in the daemon:</p>
<ul class="simple">
<li>job processing threads, part of a thread pool, and which are
long-lived, started at daemon startup and terminated only at shutdown
time</li>
<li>client I/O threads, which are the ones that talk the local protocol
(LUXI) to the clients, and are short-lived</li>
</ul>
</div>
<div class="section" id="master-startup-failover">
<h5>Master startup/failover<a class="headerlink" href="#master-startup-failover" title="Permalink to this headline"></a></h5>
<p>In Ganeti 1.x there is no protection against failing over the master
to a node with stale configuration. In effect, the responsibility of
correct failovers falls on the admin. This is true both for the new
master and for when an old, offline master startup.</p>
<p>Since in 2.x we are extending the cluster state to cover the job queue
and have a daemon that will execute by itself the job queue, we want
to have more resilience for the master role.</p>
<p>The following algorithm will happen whenever a node is ready to
transition to the master role, either at startup time or at node
failover:</p>
<ol class="arabic">
<li><p class="first">read the configuration file and parse the node list
contained within</p>
</li>
<li><p class="first">query all the nodes and make sure we obtain an agreement via
a quorum of at least half plus one nodes for the following:</p>
<blockquote>
<div><ul class="simple">
<li>we have the latest configuration and job list (as
determined by the serial number on the configuration and
highest job ID on the job queue)</li>
<li>if we are not failing over (but just starting), the
quorum agrees that we are the designated master</li>
<li>if any of the above is false, we prevent the current operation
(i.e. we don’t become the master)</li>
</ul>
</div></blockquote>
</li>
<li><p class="first">at this point, the node transitions to the master role</p>
</li>
<li><p class="first">for all the in-progress jobs, mark them as failed, with
reason unknown or something similar (master failed, etc.)</p>
</li>
</ol>
<p>Since due to exceptional conditions we could have a situation in which
no node can become the master due to inconsistent data, we will have
an override switch for the master daemon startup that will assume the
current node has the right data and will replicate all the
configuration files to the other nodes.</p>
<p><strong>Note</strong>: the above algorithm is by no means an election algorithm; it
is a <em>confirmation</em> of the master role currently held by a node.</p>
</div>
<div class="section" id="logging">
<h5>Logging<a class="headerlink" href="#logging" title="Permalink to this headline"></a></h5>
<p>The logging system will be switched completely to the standard python
logging module; currently it’s logging-based, but exposes a different
API, which is just overhead. As such, the code will be switched over
to standard logging calls, and only the setup will be custom.</p>
<p>With this change, we will remove the separate debug/info/error logs,
and instead have always one logfile per daemon model:</p>
<ul class="simple">
<li>master-daemon.log for the master daemon</li>
<li>node-daemon.log for the node daemon (this is the same as in 1.2)</li>
<li>rapi-daemon.log for the RAPI daemon logs</li>
<li>rapi-access.log, an additional log file for the RAPI that will be
in the standard HTTP log format for possible parsing by other tools</li>
</ul>
<p>Since the <a class="reference internal" href="glossary.html#term-watcher"><span class="xref std std-term">watcher</span></a> will only submit jobs to the master for
startup of the instances, its log file will contain less information
than before, mainly that it will start the instance, but not the
results.</p>
</div>
<div class="section" id="node-daemon-changes">
<h5>Node daemon changes<a class="headerlink" href="#node-daemon-changes" title="Permalink to this headline"></a></h5>
<p>The only change to the node daemon is that, since we need better
concurrency, we don’t process the inter-node RPC calls in the node
daemon itself, but we fork and process each request in a separate
child.</p>
<p>Since we don’t have many calls, and we only fork (not exec), the
overhead should be minimal.</p>
</div>
<div class="section" id="caveats">
<h5>Caveats<a class="headerlink" href="#caveats" title="Permalink to this headline"></a></h5>
<p>A discussed alternative is to keep the current individual processes
touching the cluster configuration model. The reasons we have not
chosen this approach is:</p>
<ul class="simple">
<li>the speed of reading and unserializing the cluster state
today is not small enough that we can ignore it; the addition of
the job queue will make the startup cost even higher. While this
runtime cost is low, it can be on the order of a few seconds on
bigger clusters, which for very quick commands is comparable to
the actual duration of the computation itself</li>
<li>individual commands would make it harder to implement a
fire-and-forget job request, along the lines “start this
instance but do not wait for it to finish”; it would require a
model of backgrounding the operation and other things that are
much better served by a daemon-based model</li>
</ul>
<p>Another area of discussion is moving away from Twisted in this new
implementation. While Twisted has its advantages, there are also many
disadvantages to using it:</p>
<ul class="simple">
<li>first and foremost, it’s not a library, but a framework; thus, if
you use twisted, all the code needs to be ‘twiste-ized’ and written
in an asynchronous manner, using deferreds; while this method works,
it’s not a common way to code and it requires that the entire process
workflow is based around a single <em>reactor</em> (Twisted name for a main
loop)</li>
<li>the more advanced granular locking that we want to implement would
require, if written in the async-manner, deep integration with the
Twisted stack, to such an extend that business-logic is inseparable
from the protocol coding; we felt that this is an unreasonable
request, and that a good protocol library should allow complete
separation of low-level protocol calls and business logic; by
comparison, the threaded approach combined with HTTPs protocol
required (for the first iteration) absolutely no changes from the 1.2
code, and later changes for optimizing the inter-node RPC calls
required just syntactic changes (e.g.  <code class="docutils literal"><span class="pre">rpc.call_...</span></code> to
<code class="docutils literal"><span class="pre">self.rpc.call_...</span></code>)</li>
</ul>
<p>Another issue is with the Twisted API stability - during the Ganeti
1.x lifetime, we had to to implement many times workarounds to changes
in the Twisted version, so that for example 1.2 is able to use both
Twisted 2.x and 8.x.</p>
<p>In the end, since we already had an HTTP server library for the RAPI,
we just reused that for inter-node communication.</p>
</div>
</div>
<div class="section" id="granular-locking">
<h4>Granular locking<a class="headerlink" href="#granular-locking" title="Permalink to this headline"></a></h4>
<p>We want to make sure that multiple operations can run in parallel on a
Ganeti Cluster. In order for this to happen we need to make sure
concurrently run operations don’t step on each other toes and break the
cluster.</p>
<p>This design addresses how we are going to deal with locking so that:</p>
<ul class="simple">
<li>we preserve data coherency</li>
<li>we prevent deadlocks</li>
<li>we prevent job starvation</li>
</ul>
<p>Reaching the maximum possible parallelism is a Non-Goal. We have
identified a set of operations that are currently bottlenecks and need
to be parallelised and have worked on those. In the future it will be
possible to address other needs, thus making the cluster more and more
parallel one step at a time.</p>
<p>This section only talks about parallelising Ganeti level operations, aka
Logical Units, and the locking needed for that. Any other
synchronization lock needed internally by the code is outside its scope.</p>
<div class="section" id="library-details">
<h5>Library details<a class="headerlink" href="#library-details" title="Permalink to this headline"></a></h5>
<p>The proposed library has these features:</p>
<ul class="simple">
<li>internally managing all the locks, making the implementation
transparent from their usage</li>
<li>automatically grabbing multiple locks in the right order (avoid
deadlock)</li>
<li>ability to transparently handle conversion to more granularity</li>
<li>support asynchronous operation (future goal)</li>
</ul>
<p>Locking will be valid only on the master node and will not be a
distributed operation. Therefore, in case of master failure, the
operations currently running will be aborted and the locks will be
lost; it remains to the administrator to cleanup (if needed) the
operation result (e.g. make sure an instance is either installed
correctly or removed).</p>
<p>A corollary of this is that a master-failover operation with both
masters alive needs to happen while no operations are running, and
therefore no locks are held.</p>
<p>All the locks will be represented by objects (like
<code class="docutils literal"><span class="pre">lockings.SharedLock</span></code>), and the individual locks for each object
will be created at initialisation time, from the config file.</p>
<p>The API will have a way to grab one or more than one locks at the same
time.  Any attempt to grab a lock while already holding one in the wrong
order will be checked for, and fail.</p>
</div>
<div class="section" id="the-locks">
<h5>The Locks<a class="headerlink" href="#the-locks" title="Permalink to this headline"></a></h5>
<p>At the first stage we have decided to provide the following locks:</p>
<ul class="simple">
<li>One “config file” lock</li>
<li>One lock per node in the cluster</li>
<li>One lock per instance in the cluster</li>
</ul>
<p>All the instance locks will need to be taken before the node locks, and
the node locks before the config lock. Locks will need to be acquired at
the same time for multiple instances and nodes, and internal ordering
will be dealt within the locking library, which, for simplicity, will
just use alphabetical order.</p>
<p>Each lock has the following three possible statuses:</p>
<ul class="simple">
<li>unlocked (anyone can grab the lock)</li>
<li>shared (anyone can grab/have the lock but only in shared mode)</li>
<li>exclusive (no one else can grab/have the lock)</li>
</ul>
</div>
<div class="section" id="handling-conversion-to-more-granularity">
<h5>Handling conversion to more granularity<a class="headerlink" href="#handling-conversion-to-more-granularity" title="Permalink to this headline"></a></h5>
<p>In order to convert to a more granular approach transparently each time
we split a lock into more we’ll create a “metalock”, which will depend
on those sub-locks and live for the time necessary for all the code to
convert (or forever, in some conditions). When a metalock exists all
converted code must acquire it in shared mode, so it can run
concurrently, but still be exclusive with old code, which acquires it
exclusively.</p>
<p>In the beginning the only such lock will be what replaces the current
“command” lock, and will acquire all the locks in the system, before
proceeding. This lock will be called the “Big Ganeti Lock” because
holding that one will avoid any other concurrent Ganeti operations.</p>
<p>We might also want to devise more metalocks (eg. all nodes, all
nodes+config) in order to make it easier for some parts of the code to
acquire what it needs without specifying it explicitly.</p>
<p>In the future things like the node locks could become metalocks, should
we decide to split them into an even more fine grained approach, but
this will probably be only after the first 2.0 version has been
released.</p>
</div>
<div class="section" id="adding-removing-locks">
<h5>Adding/Removing locks<a class="headerlink" href="#adding-removing-locks" title="Permalink to this headline"></a></h5>
<p>When a new instance or a new node is created an associated lock must be
added to the list. The relevant code will need to inform the locking
library of such a change.</p>
<p>This needs to be compatible with every other lock in the system,
especially metalocks that guarantee to grab sets of resources without
specifying them explicitly. The implementation of this will be handled
in the locking library itself.</p>
<p>When instances or nodes disappear from the cluster the relevant locks
must be removed. This is easier than adding new elements, as the code
which removes them must own them exclusively already, and thus deals
with metalocks exactly as normal code acquiring those locks. Any
operation queuing on a removed lock will fail after its removal.</p>
</div>
<div class="section" id="asynchronous-operations">
<h5>Asynchronous operations<a class="headerlink" href="#asynchronous-operations" title="Permalink to this headline"></a></h5>
<p>For the first version the locking library will only export synchronous
operations, which will block till the needed lock are held, and only
fail if the request is impossible or somehow erroneous.</p>
<p>In the future we may want to implement different types of asynchronous
operations such as:</p>
<ul class="simple">
<li>try to acquire this lock set and fail if not possible</li>
<li>try to acquire one of these lock sets and return the first one you
were able to get (or after a timeout) (select/poll like)</li>
</ul>
<p>These operations can be used to prioritize operations based on available
locks, rather than making them just blindly queue for acquiring them.
The inherent risk, though, is that any code using the first operation,
or setting a timeout for the second one, is susceptible to starvation
and thus may never be able to get the required locks and complete
certain tasks. Considering this providing/using these operations should
not be among our first priorities.</p>
</div>
<div class="section" id="locking-granularity">
<h5>Locking granularity<a class="headerlink" href="#locking-granularity" title="Permalink to this headline"></a></h5>
<p>For the first version of this code we’ll convert each Logical Unit to
acquire/release the locks it needs, so locking will be at the Logical
Unit level.  In the future we may want to split logical units in
independent “tasklets” with their own locking requirements. A different
design doc (or mini design doc) will cover the move from Logical Units
to tasklets.</p>
</div>
<div class="section" id="code-examples">
<h5>Code examples<a class="headerlink" href="#code-examples" title="Permalink to this headline"></a></h5>
<p>In general when acquiring locks we should use a code path equivalent
to:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">lock</span><span class="o">.</span><span class="n">acquire</span><span class="p">()</span>
<span class="k">try</span><span class="p">:</span>
  <span class="o">...</span>
  <span class="c1"># other code</span>
<span class="k">finally</span><span class="p">:</span>
  <span class="n">lock</span><span class="o">.</span><span class="n">release</span><span class="p">()</span>
</pre></div>
</div>
<p>This makes sure we release all locks, and avoid possible deadlocks. Of
course extra care must be used not to leave, if possible locked
structures in an unusable state. Note that with Python 2.5 a simpler
syntax will be possible, but we want to keep compatibility with Python
2.4 so the new constructs should not be used.</p>
<p>In order to avoid this extra indentation and code changes everywhere in
the Logical Units code, we decided to allow LUs to declare locks, and
then execute their code with their locks acquired. In the new world LUs
are called like this:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="c1"># user passed names are expanded to the internal lock/resource name,</span>
<span class="c1"># then known needed locks are declared</span>
<span class="n">lu</span><span class="o">.</span><span class="n">ExpandNames</span><span class="p">()</span>
<span class="o">...</span> <span class="n">some</span> <span class="n">locking</span><span class="o">/</span><span class="n">adding</span> <span class="n">of</span> <span class="n">locks</span> <span class="n">may</span> <span class="n">happen</span> <span class="o">...</span>
<span class="c1"># late declaration of locks for one level: this is useful because sometimes</span>
<span class="c1"># we can&#39;t know which resource we need before locking the previous level</span>
<span class="n">lu</span><span class="o">.</span><span class="n">DeclareLocks</span><span class="p">()</span> <span class="c1"># for each level (cluster, instance, node)</span>
<span class="o">...</span> <span class="n">more</span> <span class="n">locking</span><span class="o">/</span><span class="n">adding</span> <span class="n">of</span> <span class="n">locks</span> <span class="n">can</span> <span class="n">happen</span> <span class="o">...</span>
<span class="c1"># these functions are called with the proper locks held</span>
<span class="n">lu</span><span class="o">.</span><span class="n">CheckPrereq</span><span class="p">()</span>
<span class="n">lu</span><span class="o">.</span><span class="n">Exec</span><span class="p">()</span>
<span class="o">...</span> <span class="n">locks</span> <span class="n">declared</span> <span class="k">for</span> <span class="n">removal</span> <span class="n">are</span> <span class="n">removed</span><span class="p">,</span> <span class="nb">all</span> <span class="n">acquired</span> <span class="n">locks</span> <span class="n">released</span> <span class="o">...</span>
</pre></div>
</div>
<p>The Processor and the LogicalUnit class will contain exact documentation
on how locks are supposed to be declared.</p>
</div>
<div class="section" id="id3">
<h5>Caveats<a class="headerlink" href="#id3" title="Permalink to this headline"></a></h5>
<p>This library will provide an easy upgrade path to bring all the code to
granular locking without breaking everything, and it will also guarantee
against a lot of common errors. Code switching from the old “lock
everything” lock to the new system, though, needs to be carefully
scrutinised to be sure it is really acquiring all the necessary locks,
and none has been overlooked or forgotten.</p>
<p>The code can contain other locks outside of this library, to synchronise
other threaded code (eg for the job queue) but in general these should
be leaf locks or carefully structured non-leaf ones, to avoid deadlock
race conditions.</p>
</div>
</div>
<div class="section" id="job-queue">
<span id="jqueue-original-design"></span><h4>Job Queue<a class="headerlink" href="#job-queue" title="Permalink to this headline"></a></h4>
<p>Granular locking is not enough to speed up operations, we also need a
queue to store these and to be able to process as many as possible in
parallel.</p>
<p>A Ganeti job will consist of multiple <code class="docutils literal"><span class="pre">OpCodes</span></code> which are the basic
element of operation in Ganeti 1.2 (and will remain as such). Most
command-level commands are equivalent to one OpCode, or in some cases
to a sequence of opcodes, all of the same type (e.g. evacuating a node
will generate N opcodes of type replace disks).</p>
<div class="section" id="job-executionlife-of-a-ganeti-job">
<h5>Job execution—“Life of a Ganeti job”<a class="headerlink" href="#job-executionlife-of-a-ganeti-job" title="Permalink to this headline"></a></h5>
<ol class="arabic simple">
<li>Job gets submitted by the client. A new job identifier is generated
and assigned to the job. The job is then automatically replicated
<a class="footnote-reference" href="#replic" id="id4">[2]</a> to all nodes in the cluster. The identifier is returned to
the client.</li>
<li>A pool of worker threads waits for new jobs. If all are busy, the job
has to wait and the first worker finishing its work will grab it.
Otherwise any of the waiting threads will pick up the new job.</li>
<li>Client waits for job status updates by calling a waiting RPC
function. Log message may be shown to the user. Until the job is
started, it can also be canceled.</li>
<li>As soon as the job is finished, its final result and status can be
retrieved from the server.</li>
<li>If the client archives the job, it gets moved to a history directory.
There will be a method to archive all jobs older than a a given age.</li>
</ol>
<table class="docutils footnote" frame="void" id="replic" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label"><a class="fn-backref" href="#id4">[2]</a></td><td>We need replication in order to maintain the consistency
across all nodes in the system; the master node only differs in the
fact that now it is running the master daemon, but it if fails and we
do a master failover, the jobs are still visible on the new master
(though marked as failed).</td></tr>
</tbody>
</table>
<p>Failures to replicate a job to other nodes will be only flagged as
errors in the master daemon log if more than half of the nodes failed,
otherwise we ignore the failure, and rely on the fact that the next
update (for still running jobs) will retry the update. For finished
jobs, it is less of a problem.</p>
<p>Future improvements will look into checking the consistency of the job
list and jobs themselves at master daemon startup.</p>
</div>
<div class="section" id="job-storage">
<h5>Job storage<a class="headerlink" href="#job-storage" title="Permalink to this headline"></a></h5>
<p>Jobs are stored in the filesystem as individual files, serialized
using JSON (standard serialization mechanism in Ganeti).</p>
<p>The choice of storing each job in its own file was made because:</p>
<ul class="simple">
<li>a file can be atomically replaced</li>
<li>a file can easily be replicated to other nodes</li>
<li>checking consistency across nodes can be implemented very easily,
since all job files should be (at a given moment in time) identical</li>
</ul>
<p>The other possible choices that were discussed and discounted were:</p>
<ul class="simple">
<li>single big file with all job data: not feasible due to difficult
updates</li>
<li>in-process databases: hard to replicate the entire database to the
other nodes, and replicating individual operations does not mean wee
keep consistency</li>
</ul>
</div>
<div class="section" id="queue-structure">
<h5>Queue structure<a class="headerlink" href="#queue-structure" title="Permalink to this headline"></a></h5>
<p>All file operations have to be done atomically by writing to a temporary
file and subsequent renaming. Except for log messages, every change in a
job is stored and replicated to other nodes.</p>
<div class="highlight-default"><div class="highlight"><pre><span></span>/var/lib/ganeti/queue/
  job-1 (JSON encoded job description and status)
  […]
  job-37
  job-38
  job-39
  lock (Queue managing process opens this file in exclusive mode)
  serial (Last job ID used)
  version (Queue format version)
</pre></div>
</div>
</div>
<div class="section" id="locking">
<h5>Locking<a class="headerlink" href="#locking" title="Permalink to this headline"></a></h5>
<p>Locking in the job queue is a complicated topic. It is called from more
than one thread and must be thread-safe. For simplicity, a single lock
is used for the whole job queue.</p>
<p>A more detailed description can be found in doc/locking.rst.</p>
</div>
<div class="section" id="internal-rpc">
<h5>Internal RPC<a class="headerlink" href="#internal-rpc" title="Permalink to this headline"></a></h5>
<p>RPC calls available between Ganeti master and node daemons:</p>
<dl class="docutils">
<dt>jobqueue_update(file_name, content)</dt>
<dd>Writes a file in the job queue directory.</dd>
<dt>jobqueue_purge()</dt>
<dd>Cleans the job queue directory completely, including archived job.</dd>
<dt>jobqueue_rename(old, new)</dt>
<dd>Renames a file in the job queue directory.</dd>
</dl>
</div>
<div class="section" id="client-rpc">
<h5>Client RPC<a class="headerlink" href="#client-rpc" title="Permalink to this headline"></a></h5>
<p>RPC between Ganeti clients and the Ganeti master daemon supports the
following operations:</p>
<dl class="docutils">
<dt>SubmitJob(ops)</dt>
<dd>Submits a list of opcodes and returns the job identifier. The
identifier is guaranteed to be unique during the lifetime of a
cluster.</dd>
<dt>WaitForJobChange(job_id, fields, […], timeout)</dt>
<dd>This function waits until a job changes or a timeout expires. The
condition for when a job changed is defined by the fields passed and
the last log message received.</dd>
<dt>QueryJobs(job_ids, fields)</dt>
<dd>Returns field values for the job identifiers passed.</dd>
<dt>CancelJob(job_id)</dt>
<dd>Cancels the job specified by identifier. This operation may fail if
the job is already running, canceled or finished.</dd>
<dt>ArchiveJob(job_id)</dt>
<dd>Moves a job into the …/archive/ directory. This operation will fail if
the job has not been canceled or finished.</dd>
</dl>
</div>
<div class="section" id="job-and-opcode-status">
<h5>Job and opcode status<a class="headerlink" href="#job-and-opcode-status" title="Permalink to this headline"></a></h5>
<p>Each job and each opcode has, at any time, one of the following states:</p>
<dl class="docutils">
<dt>Queued</dt>
<dd>The job/opcode was submitted, but did not yet start.</dd>
<dt>Waiting</dt>
<dd>The job/opcode is waiting for a lock to proceed.</dd>
<dt>Running</dt>
<dd>The job/opcode is running.</dd>
<dt>Canceled</dt>
<dd>The job/opcode was canceled before it started.</dd>
<dt>Success</dt>
<dd>The job/opcode ran and finished successfully.</dd>
<dt>Error</dt>
<dd>The job/opcode was aborted with an error.</dd>
</dl>
<p>If the master is aborted while a job is running, the job will be set to
the Error status once the master started again.</p>
</div>
<div class="section" id="history">
<h5>History<a class="headerlink" href="#history" title="Permalink to this headline"></a></h5>
<p>Archived jobs are kept in a separate directory,
<code class="docutils literal"><span class="pre">/var/lib/ganeti/queue/archive/</span></code>.  This is done in order to speed up
the queue handling: by default, the jobs in the archive are not
touched by any functions. Only the current (unarchived) jobs are
parsed, loaded, and verified (if implemented) by the master daemon.</p>
</div>
<div class="section" id="ganeti-updates">
<h5>Ganeti updates<a class="headerlink" href="#ganeti-updates" title="Permalink to this headline"></a></h5>
<p>The queue has to be completely empty for Ganeti updates with changes
in the job queue structure. In order to allow this, there will be a
way to prevent new jobs entering the queue.</p>
</div>
</div>
<div class="section" id="object-parameters">
<h4>Object parameters<a class="headerlink" href="#object-parameters" title="Permalink to this headline"></a></h4>
<p>Across all cluster configuration data, we have multiple classes of
parameters:</p>
<ol class="upperalpha simple">
<li>cluster-wide parameters (e.g. name of the cluster, the master);
these are the ones that we have today, and are unchanged from the
current model</li>
<li>node parameters</li>
<li>instance specific parameters, e.g. the name of disks (LV), that
cannot be shared with other instances</li>
<li>instance parameters, that are or can be the same for many
instances, but are not hypervisor related; e.g. the number of VCPUs,
or the size of memory</li>
<li>instance parameters that are hypervisor specific (e.g. kernel_path
or PAE mode)</li>
</ol>
<p>The following definitions for instance parameters will be used below:</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name" colspan="2">hypervisor parameter:</th></tr>
<tr class="field-odd field"><td>&#160;</td><td class="field-body"><p class="first">a hypervisor parameter (or hypervisor specific parameter) is defined
as a parameter that is interpreted by the hypervisor support code in
Ganeti and usually is specific to a particular hypervisor (like the
kernel path for <a class="reference internal" href="glossary.html#term-pvm"><span class="xref std std-term">PVM</span></a> which makes no sense for <a class="reference internal" href="glossary.html#term-hvm"><span class="xref std std-term">HVM</span></a>).</p>
</td>
</tr>
<tr class="field-even field"><th class="field-name" colspan="2">backend parameter:</th></tr>
<tr class="field-even field"><td>&#160;</td><td class="field-body"><p class="first">a backend parameter is defined as an instance parameter that can be
shared among a list of instances, and is either generic enough not
to be tied to a given hypervisor or cannot influence at all the
hypervisor behaviour.</p>
<p>For example: memory, vcpus, auto_balance</p>
<p>All these parameters will be encoded into constants.py with the prefix
“BE_” and the whole list of parameters will exist in the set
“BES_PARAMETERS”</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name" colspan="2">proper parameter:</th></tr>
<tr class="field-odd field"><td>&#160;</td><td class="field-body"><p class="first last">a parameter whose value is unique to the instance (e.g. the name of a
LV, or the MAC of a NIC)</p>
</td>
</tr>
</tbody>
</table>
<p>As a general rule, for all kind of parameters, “None” (or in
JSON-speak, “nil”) will no longer be a valid value for a parameter. As
such, only non-default parameters will be saved as part of objects in
the serialization step, reducing the size of the serialized format.</p>
<div class="section" id="cluster-parameters">
<h5>Cluster parameters<a class="headerlink" href="#cluster-parameters" title="Permalink to this headline"></a></h5>
<p>Cluster parameters remain as today, attributes at the top level of the
Cluster object. In addition, two new attributes at this level will
hold defaults for the instances:</p>
<ul class="simple">
<li>hvparams, a dictionary indexed by hypervisor type, holding default
values for hypervisor parameters that are not defined/overridden by
the instances of this hypervisor type</li>
<li>beparams, a dictionary holding (for 2.0) a single element ‘default’,
which holds the default value for backend parameters</li>
</ul>
</div>
<div class="section" id="node-parameters">
<h5>Node parameters<a class="headerlink" href="#node-parameters" title="Permalink to this headline"></a></h5>
<p>Node-related parameters are very few, and we will continue using the
same model for these as previously (attributes on the Node object).</p>
<p>There are three new node flags, described in a separate section “node
flags” below.</p>
</div>
<div class="section" id="instance-parameters">
<h5>Instance parameters<a class="headerlink" href="#instance-parameters" title="Permalink to this headline"></a></h5>
<p>As described before, the instance parameters are split in three:
instance proper parameters, unique to each instance, instance
hypervisor parameters and instance backend parameters.</p>
<p>The “hvparams” and “beparams” are kept in two dictionaries at instance
level. Only non-default parameters are stored (but once customized, a
parameter will be kept, even with the same value as the default one,
until reset).</p>
<p>The names for hypervisor parameters in the instance.hvparams subtree
should be choosen as generic as possible, especially if specific
parameters could conceivably be useful for more than one hypervisor,
e.g. <code class="docutils literal"><span class="pre">instance.hvparams.vnc_console_port</span></code> instead of using both
<code class="docutils literal"><span class="pre">instance.hvparams.hvm_vnc_console_port</span></code> and
<code class="docutils literal"><span class="pre">instance.hvparams.kvm_vnc_console_port</span></code>.</p>
<p>There are some special cases related to disks and NICs (for example):
a disk has both Ganeti-related parameters (e.g. the name of the LV)
and hypervisor-related parameters (how the disk is presented to/named
in the instance). The former parameters remain as proper-instance
parameters, while the latter value are migrated to the hvparams
structure. In 2.0, we will have only globally-per-instance such
hypervisor parameters, and not per-disk ones (e.g. all NICs will be
exported as of the same type).</p>
<p>Starting from the 1.2 list of instance parameters, here is how they
will be mapped to the three classes of parameters:</p>
<ul class="simple">
<li>name (P)</li>
<li>primary_node (P)</li>
<li>os (P)</li>
<li>hypervisor (P)</li>
<li>status (P)</li>
<li>memory (BE)</li>
<li>vcpus (BE)</li>
<li>nics (P)</li>
<li>disks (P)</li>
<li>disk_template (P)</li>
<li>network_port (P)</li>
<li>kernel_path (HV)</li>
<li>initrd_path (HV)</li>
<li>hvm_boot_order (HV)</li>
<li>hvm_acpi (HV)</li>
<li>hvm_pae (HV)</li>
<li>hvm_cdrom_image_path (HV)</li>
<li>hvm_nic_type (HV)</li>
<li>hvm_disk_type (HV)</li>
<li>vnc_bind_address (HV)</li>
<li>serial_no (P)</li>
</ul>
</div>
<div class="section" id="parameter-validation">
<h5>Parameter validation<a class="headerlink" href="#parameter-validation" title="Permalink to this headline"></a></h5>
<p>To support the new cluster parameter design, additional features will
be required from the hypervisor support implementations in Ganeti.</p>
<p>The hypervisor support  implementation API will be extended with the
following features:</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">PARAMETERS:</th><td class="field-body">class-level attribute holding the list of valid parameters
for this hypervisor</td>
</tr>
<tr class="field-even field"><th class="field-name" colspan="2">CheckParamSyntax(hvparams):</th></tr>
<tr class="field-even field"><td>&#160;</td><td class="field-body">checks that the given parameters are
valid (as in the names are valid) for this hypervisor; usually just
comparing <code class="docutils literal"><span class="pre">hvparams.keys()</span></code> and <code class="docutils literal"><span class="pre">cls.PARAMETERS</span></code>; this is a class
method that can be called from within master code (i.e. cmdlib) and
should be safe to do so</td>
</tr>
<tr class="field-odd field"><th class="field-name" colspan="2">ValidateParameters(hvparams):</th></tr>
<tr class="field-odd field"><td>&#160;</td><td class="field-body">verifies the values of the provided
parameters against this hypervisor; this is a method that will be
called on the target node, from backend.py code, and as such can
make node-specific checks (e.g. kernel_path checking)</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="default-value-application">
<h5>Default value application<a class="headerlink" href="#default-value-application" title="Permalink to this headline"></a></h5>
<p>The application of defaults to an instance is done in the Cluster
object, via two new methods as follows:</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">Cluster.FillHV(instance)</span></code>, returns ‘filled’ hvparams dict, based on
instance’s hvparams and cluster’s <code class="docutils literal"><span class="pre">hvparams[instance.hypervisor]</span></code></li>
<li><code class="docutils literal"><span class="pre">Cluster.FillBE(instance,</span> <span class="pre">be_type=&quot;default&quot;)</span></code>, which returns the
beparams dict, based on the instance and cluster beparams</li>
</ul>
<p>The FillHV/BE transformations will be used, for example, in the
RpcRunner when sending an instance for activation/stop, and the sent
instance hvparams/beparams will have the final value (noded code doesn’t
know about defaults).</p>
<p>LU code will need to self-call the transformation, if needed.</p>
</div>
<div class="section" id="opcode-changes">
<h5>Opcode changes<a class="headerlink" href="#opcode-changes" title="Permalink to this headline"></a></h5>
<p>The parameter changes will have impact on the OpCodes, especially on
the following ones:</p>
<ul class="simple">
<li><code class="docutils literal"><span class="pre">OpInstanceCreate</span></code>, where the new hv and be parameters will be sent
as dictionaries; note that all hv and be parameters are now optional,
as the values can be instead taken from the cluster</li>
<li><code class="docutils literal"><span class="pre">OpInstanceQuery</span></code>, where we have to be able to query these new
parameters; the syntax for names will be <code class="docutils literal"><span class="pre">hvparam/$NAME</span></code> and
<code class="docutils literal"><span class="pre">beparam/$NAME</span></code> for querying an individual parameter out of one
dictionary, and <code class="docutils literal"><span class="pre">hvparams</span></code>, respectively <code class="docutils literal"><span class="pre">beparams</span></code>, for the whole
dictionaries</li>
<li><code class="docutils literal"><span class="pre">OpModifyInstance</span></code>, where the the modified parameters are sent as
dictionaries</li>
</ul>
<p>Additionally, we will need new OpCodes to modify the cluster-level
defaults for the be/hv sets of parameters.</p>
</div>
<div class="section" id="id5">
<h5>Caveats<a class="headerlink" href="#id5" title="Permalink to this headline"></a></h5>
<p>One problem that might appear is that our classification is not
complete or not good enough, and we’ll need to change this model. As
the last resort, we will need to rollback and keep 1.2 style.</p>
<p>Another problem is that classification of one parameter is unclear
(e.g. <code class="docutils literal"><span class="pre">network_port</span></code>, is this BE or HV?); in this case we’ll take
the risk of having to move parameters later between classes.</p>
</div>
<div class="section" id="security">
<h5>Security<a class="headerlink" href="#security" title="Permalink to this headline"></a></h5>
<p>The only security issue that we foresee is if some new parameters will
have sensitive value. If so, we will need to have a way to export the
config data while purging the sensitive value.</p>
<p>E.g. for the drbd shared secrets, we could export these with the
values replaced by an empty string.</p>
</div>
</div>
<div class="section" id="node-flags">
<h4>Node flags<a class="headerlink" href="#node-flags" title="Permalink to this headline"></a></h4>
<p>Ganeti 2.0 adds three node flags that change the way nodes are handled
within Ganeti and the related infrastructure (iallocator interaction,
RAPI data export).</p>
<div class="section" id="master-candidate-flag">
<h5><em>master candidate</em> flag<a class="headerlink" href="#master-candidate-flag" title="Permalink to this headline"></a></h5>
<p>Ganeti 2.0 allows more scalability in operation by introducing
parallelization. However, a new bottleneck is reached that is the
synchronization and replication of cluster configuration to all nodes
in the cluster.</p>
<p>This breaks scalability as the speed of the replication decreases
roughly with the size of the nodes in the cluster. The goal of the
master candidate flag is to change this O(n) into O(1) with respect to
job and configuration data propagation.</p>
<p>Only nodes having this flag set (let’s call this set of nodes the
<em>candidate pool</em>) will have jobs and configuration data replicated.</p>
<p>The cluster will have a new parameter (runtime changeable) called
<code class="docutils literal"><span class="pre">candidate_pool_size</span></code> which represents the number of candidates the
cluster tries to maintain (preferably automatically).</p>
<p>This will impact the cluster operations as follows:</p>
<ul class="simple">
<li>jobs and config data will be replicated only to a fixed set of nodes</li>
<li>master fail-over will only be possible to a node in the candidate pool</li>
<li>cluster verify needs changing to account for these two roles</li>
<li>external scripts will no longer have access to the configuration
file (this is not recommended anyway)</li>
</ul>
<p>The caveats of this change are:</p>
<ul class="simple">
<li>if all candidates are lost (completely), cluster configuration is
lost (but it should be backed up external to the cluster anyway)</li>
<li>failed nodes which are candidate must be dealt with properly, so
that we don’t lose too many candidates at the same time; this will be
reported in cluster verify</li>
<li>the ‘all equal’ concept of ganeti is no longer true</li>
<li>the partial distribution of config data means that all nodes will
have to revert to ssconf files for master info (as in 1.2)</li>
</ul>
<p>Advantages:</p>
<ul class="simple">
<li>speed on a 100+ nodes simulated cluster is greatly enhanced, even
for a simple operation; <code class="docutils literal"><span class="pre">gnt-instance</span> <span class="pre">remove</span></code> on a diskless instance
remove goes from ~9seconds to ~2 seconds</li>
<li>node failure of non-candidates will be less impacting on the cluster</li>
</ul>
<p>The default value for the candidate pool size will be set to 10 but
this can be changed at cluster creation and modified any time later.</p>
<p>Testing on simulated big clusters with sequential and parallel jobs
show that this value (10) is a sweet-spot from performance and load
point of view.</p>
</div>
<div class="section" id="offline-flag">
<h5><em>offline</em> flag<a class="headerlink" href="#offline-flag" title="Permalink to this headline"></a></h5>
<p>In order to support better the situation in which nodes are offline
(e.g. for repair) without altering the cluster configuration, Ganeti
needs to be told and needs to properly handle this state for nodes.</p>
<p>This will result in simpler procedures, and less mistakes, when the
amount of node failures is high on an absolute scale (either due to
high failure rate or simply big clusters).</p>
<p>Nodes having this attribute set will not be contacted for inter-node
RPC calls, will not be master candidates, and will not be able to host
instances as primaries.</p>
<p>Setting this attribute on a node:</p>
<ul class="simple">
<li>will not be allowed if the node is the master</li>
<li>will not be allowed if the node has primary instances</li>
<li>will cause the node to be demoted from the master candidate role (if
it was), possibly causing another node to be promoted to that role</li>
</ul>
<p>This attribute will impact the cluster operations as follows:</p>
<ul class="simple">
<li>querying these nodes for anything will fail instantly in the RPC
library, with a specific RPC error (RpcResult.offline == True)</li>
<li>they will be listed in the Other section of cluster verify</li>
</ul>
<p>The code is changed in the following ways:</p>
<ul class="simple">
<li>RPC calls were be converted to skip such nodes:<ul>
<li>RpcRunner-instance-based RPC calls are easy to convert</li>
<li>static/classmethod RPC calls are harder to convert, and were left
alone</li>
</ul>
</li>
<li>the RPC results were unified so that this new result state (offline)
can be differentiated</li>
<li>master voting still queries in repair nodes, as we need to ensure
consistency in case the (wrong) masters have old data, and nodes have
come back from repairs</li>
</ul>
<p>Caveats:</p>
<ul class="simple">
<li>some operation semantics are less clear (e.g. what to do on instance
start with offline secondary?); for now, these will just fail as if
the flag is not set (but faster)</li>
<li>2-node cluster with one node offline needs manual startup of the
master with a special flag to skip voting (as the master can’t get a
quorum there)</li>
</ul>
<p>One of the advantages of implementing this flag is that it will allow
in the future automation tools to automatically put the node in
repairs and recover from this state, and the code (should/will) handle
this much better than just timing out. So, future possible
improvements (for later versions):</p>
<ul class="simple">
<li>watcher will detect nodes which fail RPC calls, will attempt to ssh
to them, if failure will put them offline</li>
<li>watcher will try to ssh and query the offline nodes, if successful
will take them off the repair list</li>
</ul>
<p>Alternatives considered: The RPC call model in 2.0 is, by default,
much nicer - errors are logged in the background, and job/opcode
execution is clearer, so we could simply not introduce this. However,
having this state will make both the codepaths clearer (offline
vs. temporary failure) and the operational model (it’s not a node with
errors, but an offline node).</p>
</div>
<div class="section" id="drained-flag">
<h5><em>drained</em> flag<a class="headerlink" href="#drained-flag" title="Permalink to this headline"></a></h5>
<p>Due to parallel execution of jobs in Ganeti 2.0, we could have the
following situation:</p>
<ul class="simple">
<li>gnt-node migrate + failover is run</li>
<li>gnt-node evacuate is run, which schedules a long-running 6-opcode
job for the node</li>
<li>partway through, a new job comes in that runs an iallocator script,
which finds the above node as empty and a very good candidate</li>
<li>gnt-node evacuate has finished, but now it has to be run again, to
clean the above instance(s)</li>
</ul>
<p>In order to prevent this situation, and to be able to get nodes into
proper offline status easily, a new <em>drained</em> flag was added to the
nodes.</p>
<p>This flag (which actually means “is being, or was drained, and is
expected to go offline”), will prevent allocations on the node, but
otherwise all other operations (start/stop instance, query, etc.) are
working without any restrictions.</p>
</div>
<div class="section" id="interaction-between-flags">
<h5>Interaction between flags<a class="headerlink" href="#interaction-between-flags" title="Permalink to this headline"></a></h5>
<p>While these flags are implemented as separate flags, they are
mutually-exclusive and are acting together with the master node role
as a single <em>node status</em> value. In other words, a flag is only in one
of these roles at a given time. The lack of any of these flags denote
a regular node.</p>
<p>The current node status is visible in the <code class="docutils literal"><span class="pre">gnt-cluster</span> <span class="pre">verify</span></code>
output, and the individual flags can be examined via separate flags in
the <code class="docutils literal"><span class="pre">gnt-node</span> <span class="pre">list</span></code> output.</p>
<p>These new flags will be exported in both the iallocator input message
and via RAPI, see the respective man pages for the exact names.</p>
</div>
</div>
</div>
<div class="section" id="feature-changes">
<h3><a class="toc-backref" href="#id18">Feature changes</a><a class="headerlink" href="#feature-changes" title="Permalink to this headline"></a></h3>
<p>The main feature-level changes will be:</p>
<ul class="simple">
<li>a number of disk related changes</li>
<li>removal of fixed two-disk, one-nic per instance limitation</li>
</ul>
<div class="section" id="disk-handling-changes">
<h4>Disk handling changes<a class="headerlink" href="#disk-handling-changes" title="Permalink to this headline"></a></h4>
<p>The storage options available in Ganeti 1.x were introduced based on
then-current software (first DRBD 0.7 then later DRBD 8) and the
estimated usage patters. However, experience has later shown that some
assumptions made initially are not true and that more flexibility is
needed.</p>
<p>One main assumption made was that disk failures should be treated as
‘rare’ events, and that each of them needs to be manually handled in
order to ensure data safety; however, both these assumptions are false:</p>
<ul class="simple">
<li>disk failures can be a common occurrence, based on usage patterns or
cluster size</li>
<li>our disk setup is robust enough (referring to DRBD8 + LVM) that we
could automate more of the recovery</li>
</ul>
<p>Note that we still don’t have fully-automated disk recovery as a goal,
but our goal is to reduce the manual work needed.</p>
<p>As such, we plan the following main changes:</p>
<ul class="simple">
<li>DRBD8 is much more flexible and stable than its previous version
(0.7), such that removing the support for the <code class="docutils literal"><span class="pre">remote_raid1</span></code>
template and focusing only on DRBD8 is easier</li>
<li>dynamic discovery of DRBD devices is not actually needed in a cluster
that where the DRBD namespace is controlled by Ganeti; switching to a
static assignment (done at either instance creation time or change
secondary time) will change the disk activation time from O(n) to
O(1), which on big clusters is a significant gain</li>
<li>remove the hard dependency on LVM (currently all available storage
types are ultimately backed by LVM volumes) by introducing file-based
storage</li>
</ul>
<p>Additionally, a number of smaller enhancements are also planned:
- support variable number of disks
- support read-only disks</p>
<p>Future enhancements in the 2.x series, which do not require base design
changes, might include:</p>
<ul class="simple">
<li>enhancement of the LVM allocation method in order to try to keep
all of an instance’s virtual disks on the same physical
disks</li>
<li>add support for DRBD8 authentication at handshake time in
order to ensure each device connects to the correct peer</li>
<li>remove the restrictions on failover only to the secondary
which creates very strict rules on cluster allocation</li>
</ul>
<div class="section" id="drbd-minor-allocation">
<h5>DRBD minor allocation<a class="headerlink" href="#drbd-minor-allocation" title="Permalink to this headline"></a></h5>
<p>Currently, when trying to identify or activate a new DRBD (or MD)
device, the code scans all in-use devices in order to see if we find
one that looks similar to our parameters and is already in the desired
state or not. Since this needs external commands to be run, it is very
slow when more than a few devices are already present.</p>
<p>Therefore, we will change the discovery model from dynamic to
static. When a new device is logically created (added to the
configuration) a free minor number is computed from the list of
devices that should exist on that node and assigned to that
device.</p>
<p>At device activation, if the minor is already in use, we check if
it has our parameters; if not so, we just destroy the device (if
possible, otherwise we abort) and start it with our own
parameters.</p>
<p>This means that we in effect take ownership of the minor space for
that device type; if there’s a user-created DRBD minor, it will be
automatically removed.</p>
<p>The change will have the effect of reducing the number of external
commands run per device from a constant number times the index of the
first free DRBD minor to just a constant number.</p>
</div>
<div class="section" id="removal-of-obsolete-device-types-md-drbd7">
<h5>Removal of obsolete device types (MD, DRBD7)<a class="headerlink" href="#removal-of-obsolete-device-types-md-drbd7" title="Permalink to this headline"></a></h5>
<p>We need to remove these device types because of two issues. First,
DRBD7 has bad failure modes in case of dual failures (both network and
disk - it cannot propagate the error up the device stack and instead
just panics. Second, due to the asymmetry between primary and
secondary in MD+DRBD mode, we cannot do live failover (not even if we
had MD+DRBD8).</p>
</div>
<div class="section" id="file-based-storage-support">
<h5>File-based storage support<a class="headerlink" href="#file-based-storage-support" title="Permalink to this headline"></a></h5>
<p>Using files instead of logical volumes for instance storage would
allow us to get rid of the hard requirement for volume groups for
testing clusters and it would also allow usage of SAN storage to do
live failover taking advantage of this storage solution.</p>
</div>
<div class="section" id="better-lvm-allocation">
<h5>Better LVM allocation<a class="headerlink" href="#better-lvm-allocation" title="Permalink to this headline"></a></h5>
<p>Currently, the LV to PV allocation mechanism is a very simple one: at
each new request for a logical volume, tell LVM to allocate the volume
in order based on the amount of free space. This is good for
simplicity and for keeping the usage equally spread over the available
physical disks, however it introduces a problem that an instance could
end up with its (currently) two drives on two physical disks, or
(worse) that the data and metadata for a DRBD device end up on
different drives.</p>
<p>This is bad because it causes unneeded <code class="docutils literal"><span class="pre">replace-disks</span></code> operations in
case of a physical failure.</p>
<p>The solution is to batch allocations for an instance and make the LVM
handling code try to allocate as close as possible all the storage of
one instance. We will still allow the logical volumes to spill over to
additional disks as needed.</p>
<p>Note that this clustered allocation can only be attempted at initial
instance creation, or at change secondary node time. At add disk time,
or at replacing individual disks, it’s not easy enough to compute the
current disk map so we’ll not attempt the clustering.</p>
</div>
<div class="section" id="drbd8-peer-authentication-at-handshake">
<h5>DRBD8 peer authentication at handshake<a class="headerlink" href="#drbd8-peer-authentication-at-handshake" title="Permalink to this headline"></a></h5>
<p>DRBD8 has a new feature that allow authentication of the peer at
connect time. We can use this to prevent connecting to the wrong peer
more that securing the connection. Even though we never had issues
with wrong connections, it would be good to implement this.</p>
</div>
<div class="section" id="lvm-self-repair-optional">
<h5>LVM self-repair (optional)<a class="headerlink" href="#lvm-self-repair-optional" title="Permalink to this headline"></a></h5>
<p>The complete failure of a physical disk is very tedious to
troubleshoot, mainly because of the many failure modes and the many
steps needed. We can safely automate some of the steps, more
specifically the <code class="docutils literal"><span class="pre">vgreduce</span> <span class="pre">--removemissing</span></code> using the following
method:</p>
<ol class="arabic simple">
<li>check if all nodes have consistent volume groups</li>
<li>if yes, and previous status was yes, do nothing</li>
<li>if yes, and previous status was no, save status and restart</li>
<li>if no, and previous status was no, do nothing</li>
<li><dl class="first docutils">
<dt>if no, and previous status was yes:</dt>
<dd><ol class="first last arabic">
<li>if more than one node is inconsistent, do nothing</li>
<li><dl class="first docutils">
<dt>if only one node is inconsistent:</dt>
<dd><ol class="first last arabic">
<li>run <code class="docutils literal"><span class="pre">vgreduce</span> <span class="pre">--removemissing</span></code></li>
<li>log this occurrence in the Ganeti log in a form that
can be used for monitoring</li>
<li>[FUTURE] run <code class="docutils literal"><span class="pre">replace-disks</span></code> for all
instances affected</li>
</ol>
</dd>
</dl>
</li>
</ol>
</dd>
</dl>
</li>
</ol>
</div>
<div class="section" id="failover-to-any-node">
<h5>Failover to any node<a class="headerlink" href="#failover-to-any-node" title="Permalink to this headline"></a></h5>
<p>With a modified disk activation sequence, we can implement the
<em>failover to any</em> functionality, removing many of the layout
restrictions of a cluster:</p>
<ul class="simple">
<li>the need to reserve memory on the current secondary: this gets reduced
to a must to reserve memory anywhere on the cluster</li>
<li>the need to first failover and then replace secondary for an
instance: with failover-to-any, we can directly failover to
another node, which also does the replace disks at the same
step</li>
</ul>
<p>In the following, we denote the current primary by P1, the current
secondary by S1, and the new primary and secondaries by P2 and S2. P2
is fixed to the node the user chooses, but the choice of S2 can be
made between P1 and S1. This choice can be constrained, depending on
which of P1 and S1 has failed.</p>
<ul class="simple">
<li>if P1 has failed, then S1 must become S2, and live migration is not
possible</li>
<li>if S1 has failed, then P1 must become S2, and live migration could be
possible (in theory, but this is not a design goal for 2.0)</li>
</ul>
<p>The algorithm for performing the failover is straightforward:</p>
<ul class="simple">
<li>verify that S2 (the node the user has chosen to keep as secondary) has
valid data (is consistent)</li>
<li>tear down the current DRBD association and setup a DRBD pairing
between P2 (P2 is indicated by the user) and S2; since P2 has no data,
it will start re-syncing from S2</li>
<li>as soon as P2 is in state SyncTarget (i.e. after the resync has
started but before it has finished), we can promote it to primary role
(r/w) and start the instance on P2</li>
<li>as soon as the P2?S2 sync has finished, we can remove
the old data on the old node that has not been chosen for
S2</li>
</ul>
<p>Caveats: during the P2?S2 sync, a (non-transient) network error
will cause I/O errors on the instance, so (if a longer instance
downtime is acceptable) we can postpone the restart of the instance
until the resync is done. However, disk I/O errors on S2 will cause
data loss, since we don’t have a good copy of the data anymore, so in
this case waiting for the sync to complete is not an option. As such,
it is recommended that this feature is used only in conjunction with
proper disk monitoring.</p>
<p>Live migration note: While failover-to-any is possible for all choices
of S2, migration-to-any is possible only if we keep P1 as S2.</p>
</div>
<div class="section" id="id6">
<h5>Caveats<a class="headerlink" href="#id6" title="Permalink to this headline"></a></h5>
<p>The dynamic device model, while more complex, has an advantage: it
will not reuse by mistake the DRBD device of another instance, since
it always looks for either our own or a free one.</p>
<p>The static one, in contrast, will assume that given a minor number N,
it’s ours and we can take over. This needs careful implementation such
that if the minor is in use, either we are able to cleanly shut it
down, or we abort the startup. Otherwise, it could be that we start
syncing between two instance’s disks, causing data loss.</p>
</div>
</div>
<div class="section" id="variable-number-of-disk-nics-per-instance">
<h4>Variable number of disk/NICs per instance<a class="headerlink" href="#variable-number-of-disk-nics-per-instance" title="Permalink to this headline"></a></h4>
<div class="section" id="variable-number-of-disks">
<h5>Variable number of disks<a class="headerlink" href="#variable-number-of-disks" title="Permalink to this headline"></a></h5>
<p>In order to support high-security scenarios (for example read-only sda
and read-write sdb), we need to make a fully flexibly disk
definition. This has less impact that it might look at first sight:
only the instance creation has hard coded number of disks, not the disk
handling code. The block device handling and most of the instance
handling code is already working with “the instance’s disks” as
opposed to “the two disks of the instance”, but some pieces are not
(e.g. import/export) and the code needs a review to ensure safety.</p>
<p>The objective is to be able to specify the number of disks at
instance creation, and to be able to toggle from read-only to
read-write a disk afterward.</p>
</div>
<div class="section" id="variable-number-of-nics">
<h5>Variable number of NICs<a class="headerlink" href="#variable-number-of-nics" title="Permalink to this headline"></a></h5>
<p>Similar to the disk change, we need to allow multiple network
interfaces per instance. This will affect the internal code (some
function will have to stop assuming that <code class="docutils literal"><span class="pre">instance.nics</span></code> is a list
of length one), the OS API which currently can export/import only one
instance, and the command line interface.</p>
</div>
</div>
</div>
<div class="section" id="interface-changes">
<h3><a class="toc-backref" href="#id19">Interface changes</a><a class="headerlink" href="#interface-changes" title="Permalink to this headline"></a></h3>
<p>There are two areas of interface changes: API-level changes (the OS
interface and the RAPI interface) and the command line interface
changes.</p>
<div class="section" id="os-interface">
<h4>OS interface<a class="headerlink" href="#os-interface" title="Permalink to this headline"></a></h4>
<p>The current Ganeti OS interface, version 5, is tailored for Ganeti 1.2.
The interface is composed by a series of scripts which get called with
certain parameters to perform OS-dependent operations on the cluster.
The current scripts are:</p>
<dl class="docutils">
<dt>create</dt>
<dd>called when a new instance is added to the cluster</dd>
<dt>export</dt>
<dd>called to export an instance disk to a stream</dd>
<dt>import</dt>
<dd>called to import from a stream to a new instance</dd>
<dt>rename</dt>
<dd>called to perform the os-specific operations necessary for renaming an
instance</dd>
</dl>
<p>Currently these scripts suffer from the limitations of Ganeti 1.2: for
example they accept exactly one block and one swap devices to operate
on, rather than any amount of generic block devices, they blindly assume
that an instance will have just one network interface to operate, they
can not be configured to optimise the instance for a particular
hypervisor.</p>
<p>Since in Ganeti 2.0 we want to support multiple hypervisors, and a
non-fixed number of network and disks the OS interface need to change to
transmit the appropriate amount of information about an instance to its
managing operating system, when operating on it. Moreover since some old
assumptions usually used in OS scripts are no longer valid we need to
re-establish a common knowledge on what can be assumed and what cannot
be regarding Ganeti environment.</p>
<p>When designing the new OS API our priorities are:
- ease of use
- future extensibility
- ease of porting from the old API
- modularity</p>
<p>As such we want to limit the number of scripts that must be written to
support an OS, and make it easy to share code between them by uniforming
their input.  We also will leave the current script structure unchanged,
as far as we can, and make a few of the scripts (import, export and
rename) optional. Most information will be passed to the script through
environment variables, for ease of access and at the same time ease of
using only the information a script needs.</p>
<div class="section" id="the-scripts">
<h5>The Scripts<a class="headerlink" href="#the-scripts" title="Permalink to this headline"></a></h5>
<p>As in Ganeti 1.2, every OS which wants to be installed in Ganeti needs
to support the following functionality, through scripts:</p>
<dl class="docutils">
<dt>create:</dt>
<dd>used to create a new instance running that OS. This script should
prepare the block devices, and install them so that the new OS can
boot under the specified hypervisor.</dd>
<dt>export (optional):</dt>
<dd>used to export an installed instance using the given OS to a format
which can be used to import it back into a new instance.</dd>
<dt>import (optional):</dt>
<dd>used to import an exported instance into a new one. This script is
similar to create, but the new instance should have the content of the
export, rather than contain a pristine installation.</dd>
<dt>rename (optional):</dt>
<dd>used to perform the internal OS-specific operations needed to rename
an instance.</dd>
</dl>
<p>If any optional script is not implemented Ganeti will refuse to perform
the given operation on instances using the non-implementing OS. Of
course the create script is mandatory, and it doesn’t make sense to
support the either the export or the import operation but not both.</p>
<div class="section" id="incompatibilities-with-1-2">
<h6>Incompatibilities with 1.2<a class="headerlink" href="#incompatibilities-with-1-2" title="Permalink to this headline"></a></h6>
<p>We expect the following incompatibilities between the OS scripts for 1.2
and the ones for 2.0:</p>
<ul class="simple">
<li>Input parameters: in 1.2 those were passed on the command line, in 2.0
we’ll use environment variables, as there will be a lot more
information and not all OSes may care about all of it.</li>
<li>Number of calls: export scripts will be called once for each device
the instance has, and import scripts once for every exported disk.
Imported instances will be forced to have a number of disks greater or
equal to the one of the export.</li>
<li>Some scripts are not compulsory: if such a script is missing the
relevant operations will be forbidden for instances of that OS. This
makes it easier to distinguish between unsupported operations and
no-op ones (if any).</li>
</ul>
</div>
<div class="section" id="input">
<h6>Input<a class="headerlink" href="#input" title="Permalink to this headline"></a></h6>
<p>Rather than using command line flags, as they do now, scripts will
accept inputs from environment variables. We expect the following input
values:</p>
<dl class="docutils">
<dt>OS_API_VERSION</dt>
<dd>The version of the OS API that the following parameters comply with;
this is used so that in the future we could have OSes supporting
multiple versions and thus Ganeti send the proper version in this
parameter</dd>
<dt>INSTANCE_NAME</dt>
<dd>Name of the instance acted on</dd>
<dt>HYPERVISOR</dt>
<dd>The hypervisor the instance should run on (e.g. ‘xen-pvm’, ‘xen-hvm’,
‘kvm’)</dd>
<dt>DISK_COUNT</dt>
<dd>The number of disks this instance will have</dd>
<dt>NIC_COUNT</dt>
<dd>The number of NICs this instance will have</dd>
<dt>DISK_&lt;N&gt;_PATH</dt>
<dd>Path to the Nth disk.</dd>
<dt>DISK_&lt;N&gt;_ACCESS</dt>
<dd>W if read/write, R if read only. OS scripts are not supposed to touch
read-only disks, but will be passed them to know.</dd>
<dt>DISK_&lt;N&gt;_FRONTEND_TYPE</dt>
<dd>Type of the disk as seen by the instance. Can be ‘scsi’, ‘ide’,
‘virtio’</dd>
<dt>DISK_&lt;N&gt;_BACKEND_TYPE</dt>
<dd>Type of the disk as seen from the node. Can be ‘block’, ‘<a class="reference external" href="file:loop">file:loop</a>’ or
‘<a class="reference external" href="file:blktap">file:blktap</a></dd>
<dt>NIC_&lt;N&gt;_MAC</dt>
<dd>Mac address for the Nth network interface</dd>
<dt>NIC_&lt;N&gt;_IP</dt>
<dd>Ip address for the Nth network interface, if available</dd>
<dt>NIC_&lt;N&gt;_BRIDGE</dt>
<dd>Node bridge the Nth network interface will be connected to</dd>
<dt>NIC_&lt;N&gt;_FRONTEND_TYPE</dt>
<dd>Type of the Nth NIC as seen by the instance. For example ‘virtio’,
‘rtl8139’, etc.</dd>
<dt>DEBUG_LEVEL</dt>
<dd>Whether more out should be produced, for debugging purposes. Currently
the only valid values are 0 and 1.</dd>
</dl>
<p>These are only the basic variables we are thinking of now, but more
may come during the implementation and they will be documented in the
<em class="manpage">ganeti-os-interface(7)</em> man page. All these variables will be
available to all scripts.</p>
<p>Some scripts will need a few more information to work. These will have
per-script variables, such as for example:</p>
<dl class="docutils">
<dt>OLD_INSTANCE_NAME</dt>
<dd>rename: the name the instance should be renamed from.</dd>
<dt>EXPORT_DEVICE</dt>
<dd>export: device to be exported, a snapshot of the actual device. The
data must be exported to stdout.</dd>
<dt>EXPORT_INDEX</dt>
<dd>export: sequential number of the instance device targeted.</dd>
<dt>IMPORT_DEVICE</dt>
<dd>import: device to send the data to, part of the new instance. The data
must be imported from stdin.</dd>
<dt>IMPORT_INDEX</dt>
<dd>import: sequential number of the instance device targeted.</dd>
</dl>
<p>(Rationale for INSTANCE_NAME as an environment variable: the instance
name is always needed and we could pass it on the command line. On the
other hand, though, this would force scripts to both access the
environment and parse the command line, so we’ll move it for
uniformity.)</p>
</div>
<div class="section" id="output-behaviour">
<h6>Output/Behaviour<a class="headerlink" href="#output-behaviour" title="Permalink to this headline"></a></h6>
<p>As discussed scripts should only send user-targeted information to
stderr. The create and import scripts are supposed to format/initialise
the given block devices and install the correct instance data. The
export script is supposed to export instance data to stdout in a format
understandable by the the import script. The data will be compressed by
Ganeti, so no compression should be done. The rename script should only
modify the instance’s knowledge of what its name is.</p>
</div>
</div>
<div class="section" id="other-declarative-style-features">
<h5>Other declarative style features<a class="headerlink" href="#other-declarative-style-features" title="Permalink to this headline"></a></h5>
<p>Similar to Ganeti 1.2, OS specifications will need to provide a
‘ganeti_api_version’ containing list of numbers matching the
version(s) of the API they implement. Ganeti itself will always be
compatible with one version of the API and may maintain backwards
compatibility if it’s feasible to do so. The numbers are one-per-line,
so an OS supporting both version 5 and version 20 will have a file
containing two lines. This is different from Ganeti 1.2, which only
supported one version number.</p>
<p>In addition to that an OS will be able to declare that it does support
only a subset of the Ganeti hypervisors, by declaring them in the
‘hypervisors’ file.</p>
</div>
<div class="section" id="caveats-notes">
<h5>Caveats/Notes<a class="headerlink" href="#caveats-notes" title="Permalink to this headline"></a></h5>
<p>We might want to have a “default” import/export behaviour that just
dumps all disks and restores them. This can save work as most systems
will just do this, while allowing flexibility for different systems.</p>
<p>Environment variables are limited in size, but we expect that there will
be enough space to store the information we need. If we discover that
this is not the case we may want to go to a more complex API such as
storing those information on the filesystem and providing the OS script
with the path to a file where they are encoded in some format.</p>
</div>
</div>
<div class="section" id="remote-api-changes">
<h4>Remote API changes<a class="headerlink" href="#remote-api-changes" title="Permalink to this headline"></a></h4>
<p>The first Ganeti remote API (RAPI) was designed and deployed with the
Ganeti 1.2.5 release.  That version provide read-only access to the
cluster state. Fully functional read-write API demands significant
internal changes which will be implemented in version 2.0.</p>
<p>We decided to go with implementing the Ganeti RAPI in a RESTful way,
which is aligned with key features we looking. It is simple,
stateless, scalable and extensible paradigm of API implementation. As
transport it uses HTTP over SSL, and we are implementing it with JSON
encoding, but in a way it possible to extend and provide any other
one.</p>
<div class="section" id="design">
<h5>Design<a class="headerlink" href="#design" title="Permalink to this headline"></a></h5>
<p>The Ganeti RAPI is implemented as independent daemon, running on the
same node with the same permission level as Ganeti master
daemon. Communication is done through the LUXI library to the master
daemon. In order to keep communication asynchronous, RAPI processes two
types of client requests:</p>
<ul class="simple">
<li>queries: server is able to answer immediately</li>
<li>job submission: some time is required for a useful response</li>
</ul>
<p>In the query case requested data is sent back to client in the HTTP
response body. Typical examples of queries would be: list of nodes,
instances, cluster info, etc.</p>
<p>In the case of job submission, the client receive a job ID, the
identifier which allows one to query the job progress in the job queue
(see <a class="reference internal" href="#job-queue">Job Queue</a>).</p>
<p>Internally, each exported object has a version identifier, which is
used as a state identifier in the HTTP header E-Tag field for
requests/responses to avoid race conditions.</p>
</div>
<div class="section" id="resource-representation">
<h5>Resource representation<a class="headerlink" href="#resource-representation" title="Permalink to this headline"></a></h5>
<p>The key difference of using REST instead of others API is that REST
requires separation of services via resources with unique URIs. Each
of them should have limited amount of state and support standard HTTP
methods: GET, POST, DELETE, PUT.</p>
<p>For example in Ganeti’s case we can have a set of URI:</p>
<blockquote>
<div><ul class="simple">
<li><code class="docutils literal"><span class="pre">/{clustername}/instances</span></code></li>
<li><code class="docutils literal"><span class="pre">/{clustername}/instances/{instancename}</span></code></li>
<li><code class="docutils literal"><span class="pre">/{clustername}/instances/{instancename}/tag</span></code></li>
<li><code class="docutils literal"><span class="pre">/{clustername}/tag</span></code></li>
</ul>
</div></blockquote>
<p>A GET request to <code class="docutils literal"><span class="pre">/{clustername}/instances</span></code> will return the list of
instances, a POST to <code class="docutils literal"><span class="pre">/{clustername}/instances</span></code> should create a new
instance, a DELETE <code class="docutils literal"><span class="pre">/{clustername}/instances/{instancename}</span></code> should
delete the instance, a GET <code class="docutils literal"><span class="pre">/{clustername}/tag</span></code> should return get
cluster tags.</p>
<p>Each resource URI will have a version prefix. The resource IDs are to
be determined.</p>
<p>Internal encoding might be JSON, XML, or any other. The JSON encoding
fits nicely in Ganeti RAPI needs. The client can request a specific
representation via the Accept field in the HTTP header.</p>
<p>REST uses HTTP as its transport and application protocol for resource
access. The set of possible responses is a subset of standard HTTP
responses.</p>
<p>The statelessness model provides additional reliability and
transparency to operations (e.g. only one request needs to be analyzed
to understand the in-progress operation, not a sequence of multiple
requests/responses).</p>
</div>
<div class="section" id="id7">
<h5>Security<a class="headerlink" href="#id7" title="Permalink to this headline"></a></h5>
<p>With the write functionality security becomes a much bigger an issue.
The Ganeti RAPI uses basic HTTP authentication on top of an
SSL-secured connection to grant access to an exported resource. The
password is stored locally in an Apache-style <code class="docutils literal"><span class="pre">.htpasswd</span></code> file. Only
one level of privileges is supported.</p>
</div>
<div class="section" id="id8">
<h5>Caveats<a class="headerlink" href="#id8" title="Permalink to this headline"></a></h5>
<p>The model detailed above for job submission requires the client to
poll periodically for updates to the job; an alternative would be to
allow the client to request a callback, or a ‘wait for updates’ call.</p>
<p>The callback model was not considered due to the following two issues:</p>
<ul class="simple">
<li>callbacks would require a new model of allowed callback URLs,
together with a method of managing these</li>
<li>callbacks only work when the client and the master are in the same
security domain, and they fail in the other cases (e.g. when there is
a firewall between the client and the RAPI daemon that only allows
client-to-RAPI calls, which is usual in DMZ cases)</li>
</ul>
<p>The ‘wait for updates’ method is not suited to the HTTP protocol,
where requests are supposed to be short-lived.</p>
</div>
</div>
<div class="section" id="command-line-changes">
<h4>Command line changes<a class="headerlink" href="#command-line-changes" title="Permalink to this headline"></a></h4>
<p>Ganeti 2.0 introduces several new features as well as new ways to
handle instance resources like disks or network interfaces. This
requires some noticeable changes in the way command line arguments are
handled.</p>
<ul class="simple">
<li>extend and modify command line syntax to support new features</li>
<li>ensure consistent patterns in command line arguments to reduce
cognitive load</li>
</ul>
<p>The design changes that require these changes are, in no particular
order:</p>
<ul class="simple">
<li>flexible instance disk handling: support a variable number of disks
with varying properties per instance,</li>
<li>flexible instance network interface handling: support a variable
number of network interfaces with varying properties per instance</li>
<li>multiple hypervisors: multiple hypervisors can be active on the same
cluster, each supporting different parameters,</li>
<li>support for device type CDROM (via ISO image)</li>
</ul>
<p>As such, there are several areas of Ganeti where the command line
arguments will change:</p>
<ul class="simple">
<li>Cluster configuration<ul>
<li>cluster initialization</li>
<li>cluster default configuration</li>
</ul>
</li>
<li>Instance configuration<ul>
<li>handling of network cards for instances,</li>
<li>handling of disks for instances,</li>
<li>handling of CDROM devices and</li>
<li>handling of hypervisor specific options.</li>
</ul>
</li>
</ul>
<p>There are several areas of Ganeti where the command line arguments
will change:</p>
<ul class="simple">
<li>Cluster configuration<ul>
<li>cluster initialization</li>
<li>cluster default configuration</li>
</ul>
</li>
<li>Instance configuration<ul>
<li>handling of network cards for instances,</li>
<li>handling of disks for instances,</li>
<li>handling of CDROM devices and</li>
<li>handling of hypervisor specific options.</li>
</ul>
</li>
</ul>
<div class="section" id="notes-about-device-removal-addition">
<h5>Notes about device removal/addition<a class="headerlink" href="#notes-about-device-removal-addition" title="Permalink to this headline"></a></h5>
<p>To avoid problems with device location changes (e.g. second network
interface of the instance becoming the first or third and the like)
the list of network/disk devices is treated as a stack, i.e. devices
can only be added/removed at the end of the list of devices of each
class (disk or network) for each instance.</p>
</div>
<div class="section" id="gnt-instance-commands">
<h5>gnt-instance commands<a class="headerlink" href="#gnt-instance-commands" title="Permalink to this headline"></a></h5>
<p>The commands for gnt-instance will be modified and extended to allow
for the new functionality:</p>
<ul class="simple">
<li>the add command will be extended to support the new device and
hypervisor options,</li>
<li>the modify command continues to handle all modifications to
instances, but will be extended with new arguments for handling
devices.</li>
</ul>
</div>
<div class="section" id="network-device-options">
<h5>Network Device Options<a class="headerlink" href="#network-device-options" title="Permalink to this headline"></a></h5>
<p>The generic format of the network device option is:</p>
<blockquote>
<div>–net $DEVNUM[:$OPTION=$VALUE][,$OPTION=VALUE]</div></blockquote>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">$DEVNUM:</th><td class="field-body">device number, unsigned integer, starting at 0,</td>
</tr>
<tr class="field-even field"><th class="field-name">$OPTION:</th><td class="field-body">device option, string,</td>
</tr>
<tr class="field-odd field"><th class="field-name">$VALUE:</th><td class="field-body">device option value, string.</td>
</tr>
</tbody>
</table>
<p>Currently, the following device options will be defined (open to
further changes):</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">mac:</th><td class="field-body">MAC address of the network interface, accepts either a valid
MAC address or the string ‘auto’. If ‘auto’ is specified, a new MAC
address will be generated randomly. If the mac device option is not
specified, the default value ‘auto’ is assumed.</td>
</tr>
<tr class="field-even field"><th class="field-name">bridge:</th><td class="field-body">network bridge the network interface is connected
to. Accepts either a valid bridge name (the specified bridge must
exist on the node(s)) as string or the string ‘auto’. If ‘auto’ is
specified, the default brigde is used. If the bridge option is not
specified, the default value ‘auto’ is assumed.</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="disk-device-options">
<h5>Disk Device Options<a class="headerlink" href="#disk-device-options" title="Permalink to this headline"></a></h5>
<p>The generic format of the disk device option is:</p>
<blockquote>
<div>–disk $DEVNUM[:$OPTION=$VALUE][,$OPTION=VALUE]</div></blockquote>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">$DEVNUM:</th><td class="field-body">device number, unsigned integer, starting at 0,</td>
</tr>
<tr class="field-even field"><th class="field-name">$OPTION:</th><td class="field-body">device option, string,</td>
</tr>
<tr class="field-odd field"><th class="field-name">$VALUE:</th><td class="field-body">device option value, string.</td>
</tr>
</tbody>
</table>
<p>Currently, the following device options will be defined (open to
further changes):</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">size:</th><td class="field-body"><p class="first">size of the disk device, either a positive number, specifying
the disk size in mebibytes, or a number followed by a magnitude suffix
(M for mebibytes, G for gibibytes). Also accepts the string ‘auto’ in
which case the default disk size will be used. If the size option is
not specified, ‘auto’ is assumed. This option is not valid for all
disk layout types.</p>
</td>
</tr>
<tr class="field-even field"><th class="field-name">access:</th><td class="field-body"><p class="first">access mode of the disk device, a single letter, valid values
are:</p>
<ul class="simple">
<li><em>w</em>: read/write access to the disk device or</li>
<li><em>r</em>: read-only access to the disk device.</li>
</ul>
<p>If the access mode is not specified, the default mode of read/write
access will be configured.</p>
</td>
</tr>
<tr class="field-odd field"><th class="field-name">path:</th><td class="field-body"><p class="first last">path to the image file for the disk device, string. No default
exists. This option is not valid for all disk layout types.</p>
</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="adding-devices">
<h5>Adding devices<a class="headerlink" href="#adding-devices" title="Permalink to this headline"></a></h5>
<p>To add devices to an already existing instance, use the device type
specific option to gnt-instance modify. Currently, there are two
device type specific options supported:</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">–net:</th><td class="field-body">for network interface cards</td>
</tr>
<tr class="field-even field"><th class="field-name">–disk:</th><td class="field-body">for disk devices</td>
</tr>
</tbody>
</table>
<p>The syntax to the device specific options is similar to the generic
device options, but instead of specifying a device number like for
gnt-instance add, you specify the magic string add. The new device
will always be appended at the end of the list of devices of this type
for the specified instance, e.g. if the instance has disk devices 0,1
and 2, the newly added disk device will be disk device 3.</p>
<p>Example: gnt-instance modify –net add:mac=auto test-instance</p>
</div>
<div class="section" id="removing-devices">
<h5>Removing devices<a class="headerlink" href="#removing-devices" title="Permalink to this headline"></a></h5>
<p>Removing devices from and instance is done via gnt-instance
modify. The same device specific options as for adding instances are
used. Instead of a device number and further device options, only the
magic string remove is specified. It will always remove the last
device in the list of devices of this type for the instance specified,
e.g. if the instance has disk devices 0, 1, 2 and 3, the disk device
number 3 will be removed.</p>
<p>Example: gnt-instance modify –net remove test-instance</p>
</div>
<div class="section" id="modifying-devices">
<h5>Modifying devices<a class="headerlink" href="#modifying-devices" title="Permalink to this headline"></a></h5>
<p>Modifying devices is also done with device type specific options to
the gnt-instance modify command. There are currently two device type
options supported:</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">–net:</th><td class="field-body">for network interface cards</td>
</tr>
<tr class="field-even field"><th class="field-name">–disk:</th><td class="field-body">for disk devices</td>
</tr>
</tbody>
</table>
<p>The syntax to the device specific options is similar to the generic
device options. The device number you specify identifies the device to
be modified.</p>
<p>Example:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">gnt</span><span class="o">-</span><span class="n">instance</span> <span class="n">modify</span> <span class="o">--</span><span class="n">disk</span> <span class="mi">2</span><span class="p">:</span><span class="n">access</span><span class="o">=</span><span class="n">r</span>
</pre></div>
</div>
</div>
<div class="section" id="hypervisor-options">
<h5>Hypervisor Options<a class="headerlink" href="#hypervisor-options" title="Permalink to this headline"></a></h5>
<p>Ganeti 2.0 will support more than one hypervisor. Different
hypervisors have various options that only apply to a specific
hypervisor. Those hypervisor specific options are treated specially
via the <code class="docutils literal"><span class="pre">--hypervisor</span></code> option. The generic syntax of the hypervisor
option is as follows:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span>--hypervisor $HYPERVISOR:$OPTION=$VALUE[,$OPTION=$VALUE]
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">$HYPERVISOR:</th><td class="field-body">symbolic name of the hypervisor to use, string,
has to match the supported hypervisors. Example: xen-pvm</td>
</tr>
<tr class="field-even field"><th class="field-name">$OPTION:</th><td class="field-body">hypervisor option name, string</td>
</tr>
<tr class="field-odd field"><th class="field-name">$VALUE:</th><td class="field-body">hypervisor option value, string</td>
</tr>
</tbody>
</table>
<p>The hypervisor option for an instance can be set on instance creation
time via the <code class="docutils literal"><span class="pre">gnt-instance</span> <span class="pre">add</span></code> command. If the hypervisor for an
instance is not specified upon instance creation, the default
hypervisor will be used.</p>
</div>
<div class="section" id="modifying-hypervisor-parameters">
<h5>Modifying hypervisor parameters<a class="headerlink" href="#modifying-hypervisor-parameters" title="Permalink to this headline"></a></h5>
<p>The hypervisor parameters of an existing instance can be modified
using <code class="docutils literal"><span class="pre">--hypervisor</span></code> option of the <code class="docutils literal"><span class="pre">gnt-instance</span> <span class="pre">modify</span></code>
command. However, the hypervisor type of an existing instance can not
be changed, only the particular hypervisor specific option can be
changed. Therefore, the format of the option parameters has been
simplified to omit the hypervisor name and only contain the comma
separated list of option-value pairs.</p>
<p>Example:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span><span class="n">gnt</span><span class="o">-</span><span class="n">instance</span> <span class="n">modify</span> <span class="o">--</span><span class="n">hypervisor</span> <span class="n">cdrom</span><span class="o">=/</span><span class="n">srv</span><span class="o">/</span><span class="n">boot</span><span class="o">.</span><span class="n">iso</span><span class="p">,</span><span class="n">boot_order</span><span class="o">=</span><span class="n">cdrom</span><span class="p">:</span><span class="n">network</span> <span class="n">test</span><span class="o">-</span><span class="n">instance</span>
</pre></div>
</div>
</div>
<div class="section" id="gnt-cluster-commands">
<h5>gnt-cluster commands<a class="headerlink" href="#gnt-cluster-commands" title="Permalink to this headline"></a></h5>
<p>The command for gnt-cluster will be extended to allow setting and
changing the default parameters of the cluster:</p>
<ul class="simple">
<li>The init command will be extend to support the defaults option to
set the cluster defaults upon cluster initialization.</li>
<li>The modify command will be added to modify the cluster
parameters. It will support the –defaults option to change the
cluster defaults.</li>
</ul>
<p>Cluster defaults</p>
<p>The generic format of the cluster default setting option is:</p>
<blockquote>
<div>–defaults $OPTION=$VALUE[,$OPTION=$VALUE]</div></blockquote>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">$OPTION:</th><td class="field-body">cluster default option, string,</td>
</tr>
<tr class="field-even field"><th class="field-name">$VALUE:</th><td class="field-body">cluster default option value, string.</td>
</tr>
</tbody>
</table>
<p>Currently, the following cluster default options are defined (open to
further changes):</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">hypervisor:</th><td class="field-body">the default hypervisor to use for new instances,
string. Must be a valid hypervisor known to and supported by the
cluster.</td>
</tr>
<tr class="field-even field"><th class="field-name">disksize:</th><td class="field-body">the disksize for newly created instance disks, where
applicable. Must be either a positive number, in which case the unit
of megabyte is assumed, or a positive number followed by a supported
magnitude symbol (M for megabyte or G for gigabyte).</td>
</tr>
<tr class="field-odd field"><th class="field-name">bridge:</th><td class="field-body">the default network bridge to use for newly created instance
network interfaces, string. Must be a valid bridge name of a bridge
existing on the node(s).</td>
</tr>
</tbody>
</table>
</div>
<div class="section" id="hypervisor-cluster-defaults">
<h5>Hypervisor cluster defaults<a class="headerlink" href="#hypervisor-cluster-defaults" title="Permalink to this headline"></a></h5>
<p>The generic format of the hypervisor cluster wide default setting
option is:</p>
<div class="highlight-default"><div class="highlight"><pre><span></span>--hypervisor-defaults $HYPERVISOR:$OPTION=$VALUE[,$OPTION=$VALUE]
</pre></div>
</div>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">$HYPERVISOR:</th><td class="field-body">symbolic name of the hypervisor whose defaults you want
to set, string</td>
</tr>
<tr class="field-even field"><th class="field-name">$OPTION:</th><td class="field-body">cluster default option, string,</td>
</tr>
<tr class="field-odd field"><th class="field-name">$VALUE:</th><td class="field-body">cluster default option value, string.</td>
</tr>
</tbody>
</table>
</div>
</div>
</div>
</div>
</div>


          </div>
        </div>
      </div>
      <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
        <div class="sphinxsidebarwrapper">
  <h3><a href="index.html">Table Of Contents</a></h3>
  <ul>
<li><a class="reference internal" href="#">Ganeti 2.0 design</a><ul>
<li><a class="reference internal" href="#objective">Objective</a></li>
<li><a class="reference internal" href="#background">Background</a><ul>
<li><a class="reference internal" href="#scalability-problems">Scalability problems</a></li>
<li><a class="reference internal" href="#artificial-restrictions">Artificial restrictions</a></li>
<li><a class="reference internal" href="#architecture-issues">Architecture issues</a></li>
</ul>
</li>
<li><a class="reference internal" href="#overview">Overview</a></li>
<li><a class="reference internal" href="#detailed-design">Detailed design</a><ul>
<li><a class="reference internal" href="#core-changes">Core changes</a><ul>
<li><a class="reference internal" href="#master-daemon">Master daemon</a><ul>
<li><a class="reference internal" href="#the-luxi-protocol">The LUXI protocol</a></li>
<li><a class="reference internal" href="#master-daemon-implementation">Master daemon implementation</a></li>
<li><a class="reference internal" href="#master-startup-failover">Master startup/failover</a></li>
<li><a class="reference internal" href="#logging">Logging</a></li>
<li><a class="reference internal" href="#node-daemon-changes">Node daemon changes</a></li>
<li><a class="reference internal" href="#caveats">Caveats</a></li>
</ul>
</li>
<li><a class="reference internal" href="#granular-locking">Granular locking</a><ul>
<li><a class="reference internal" href="#library-details">Library details</a></li>
<li><a class="reference internal" href="#the-locks">The Locks</a></li>
<li><a class="reference internal" href="#handling-conversion-to-more-granularity">Handling conversion to more granularity</a></li>
<li><a class="reference internal" href="#adding-removing-locks">Adding/Removing locks</a></li>
<li><a class="reference internal" href="#asynchronous-operations">Asynchronous operations</a></li>
<li><a class="reference internal" href="#locking-granularity">Locking granularity</a></li>
<li><a class="reference internal" href="#code-examples">Code examples</a></li>
<li><a class="reference internal" href="#id3">Caveats</a></li>
</ul>
</li>
<li><a class="reference internal" href="#job-queue">Job Queue</a><ul>
<li><a class="reference internal" href="#job-executionlife-of-a-ganeti-job">Job execution—“Life of a Ganeti job”</a></li>
<li><a class="reference internal" href="#job-storage">Job storage</a></li>
<li><a class="reference internal" href="#queue-structure">Queue structure</a></li>
<li><a class="reference internal" href="#locking">Locking</a></li>
<li><a class="reference internal" href="#internal-rpc">Internal RPC</a></li>
<li><a class="reference internal" href="#client-rpc">Client RPC</a></li>
<li><a class="reference internal" href="#job-and-opcode-status">Job and opcode status</a></li>
<li><a class="reference internal" href="#history">History</a></li>
<li><a class="reference internal" href="#ganeti-updates">Ganeti updates</a></li>
</ul>
</li>
<li><a class="reference internal" href="#object-parameters">Object parameters</a><ul>
<li><a class="reference internal" href="#cluster-parameters">Cluster parameters</a></li>
<li><a class="reference internal" href="#node-parameters">Node parameters</a></li>
<li><a class="reference internal" href="#instance-parameters">Instance parameters</a></li>
<li><a class="reference internal" href="#parameter-validation">Parameter validation</a></li>
<li><a class="reference internal" href="#default-value-application">Default value application</a></li>
<li><a class="reference internal" href="#opcode-changes">Opcode changes</a></li>
<li><a class="reference internal" href="#id5">Caveats</a></li>
<li><a class="reference internal" href="#security">Security</a></li>
</ul>
</li>
<li><a class="reference internal" href="#node-flags">Node flags</a><ul>
<li><a class="reference internal" href="#master-candidate-flag"><em>master candidate</em> flag</a></li>
<li><a class="reference internal" href="#offline-flag"><em>offline</em> flag</a></li>
<li><a class="reference internal" href="#drained-flag"><em>drained</em> flag</a></li>
<li><a class="reference internal" href="#interaction-between-flags">Interaction between flags</a></li>
</ul>
</li>
</ul>
</li>
<li><a class="reference internal" href="#feature-changes">Feature changes</a><ul>
<li><a class="reference internal" href="#disk-handling-changes">Disk handling changes</a><ul>
<li><a class="reference internal" href="#drbd-minor-allocation">DRBD minor allocation</a></li>
<li><a class="reference internal" href="#removal-of-obsolete-device-types-md-drbd7">Removal of obsolete device types (MD, DRBD7)</a></li>
<li><a class="reference internal" href="#file-based-storage-support">File-based storage support</a></li>
<li><a class="reference internal" href="#better-lvm-allocation">Better LVM allocation</a></li>
<li><a class="reference internal" href="#drbd8-peer-authentication-at-handshake">DRBD8 peer authentication at handshake</a></li>
<li><a class="reference internal" href="#lvm-self-repair-optional">LVM self-repair (optional)</a></li>
<li><a class="reference internal" href="#failover-to-any-node">Failover to any node</a></li>
<li><a class="reference internal" href="#id6">Caveats</a></li>
</ul>
</li>
<li><a class="reference internal" href="#variable-number-of-disk-nics-per-instance">Variable number of disk/NICs per instance</a><ul>
<li><a class="reference internal" href="#variable-number-of-disks">Variable number of disks</a></li>
<li><a class="reference internal" href="#variable-number-of-nics">Variable number of NICs</a></li>
</ul>
</li>
</ul>
</li>
<li><a class="reference internal" href="#interface-changes">Interface changes</a><ul>
<li><a class="reference internal" href="#os-interface">OS interface</a><ul>
<li><a class="reference internal" href="#the-scripts">The Scripts</a><ul>
<li><a class="reference internal" href="#incompatibilities-with-1-2">Incompatibilities with 1.2</a></li>
<li><a class="reference internal" href="#input">Input</a></li>
<li><a class="reference internal" href="#output-behaviour">Output/Behaviour</a></li>
</ul>
</li>
<li><a class="reference internal" href="#other-declarative-style-features">Other declarative style features</a></li>
<li><a class="reference internal" href="#caveats-notes">Caveats/Notes</a></li>
</ul>
</li>
<li><a class="reference internal" href="#remote-api-changes">Remote API changes</a><ul>
<li><a class="reference internal" href="#design">Design</a></li>
<li><a class="reference internal" href="#resource-representation">Resource representation</a></li>
<li><a class="reference internal" href="#id7">Security</a></li>
<li><a class="reference internal" href="#id8">Caveats</a></li>
</ul>
</li>
<li><a class="reference internal" href="#command-line-changes">Command line changes</a><ul>
<li><a class="reference internal" href="#notes-about-device-removal-addition">Notes about device removal/addition</a></li>
<li><a class="reference internal" href="#gnt-instance-commands">gnt-instance commands</a></li>
<li><a class="reference internal" href="#network-device-options">Network Device Options</a></li>
<li><a class="reference internal" href="#disk-device-options">Disk Device Options</a></li>
<li><a class="reference internal" href="#adding-devices">Adding devices</a></li>
<li><a class="reference internal" href="#removing-devices">Removing devices</a></li>
<li><a class="reference internal" href="#modifying-devices">Modifying devices</a></li>
<li><a class="reference internal" href="#hypervisor-options">Hypervisor Options</a></li>
<li><a class="reference internal" href="#modifying-hypervisor-parameters">Modifying hypervisor parameters</a></li>
<li><a class="reference internal" href="#gnt-cluster-commands">gnt-cluster commands</a></li>
<li><a class="reference internal" href="#hypervisor-cluster-defaults">Hypervisor cluster defaults</a></li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</li>
</ul>

  <h4>Previous topic</h4>
  <p class="topless"><a href="index.html"
                        title="previous chapter">Welcome to Ganeti’s documentation!</a></p>
  <h4>Next topic</h4>
  <p class="topless"><a href="design-2.1.html"
                        title="next chapter">Ganeti 2.1 design</a></p>
  <div role="note" aria-label="source link">
    <h3>This Page</h3>
    <ul class="this-page-menu">
      <li><a href="_sources/design-2.0.rst.txt"
            rel="nofollow">Show Source</a></li>
    </ul>
   </div>
<div id="searchbox" style="display: none" role="search">
  <h3>Quick search</h3>
    <form class="search" action="search.html" method="get">
      <div><input type="text" name="q" /></div>
      <div><input type="submit" value="Go" /></div>
      <input type="hidden" name="check_keywords" value="yes" />
      <input type="hidden" name="area" value="default" />
    </form>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
        </div>
      </div>
      <div class="clearer"></div>
    </div>
    <div class="related" role="navigation" aria-label="related navigation">
      <h3>Navigation</h3>
      <ul>
        <li class="right" style="margin-right: 10px">
          <a href="design-2.1.html" title="Ganeti 2.1 design"
             >next</a></li>
        <li class="right" >
          <a href="index.html" title="Welcome to Ganeti’s documentation!"
             >previous</a> |</li>
        <li class="nav-item nav-item-0"><a href="index.html">Ganeti 2.16.0~rc2 documentation</a> &#187;</li> 
      </ul>
    </div>
    <div class="footer" role="contentinfo">
        &#169; Copyright 2018, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Google Inc..
      Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.6.7.
    </div>
  </body>
</html>