This file is indexed.

/usr/share/doc/libvirt-doc/migration.html is in libvirt-doc 4.0.0-1ubuntu8.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
  <!--
        This file is autogenerated from migration.html.in
        Do not edit this file. Changes will be lost.
      -->
  <!--
        This page was generated at Fri Jan 12 14:53:45 UTC 2018.
      -->
  <head>
    <meta charset="UTF-8"/>
    <meta name="viewport" content="width=device-width, initial-scale=1"/>
    <link rel="stylesheet" type="text/css" href="main.css"/>
    <link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png"/>
    <link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png"/>
    <link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png"/>
    <link rel="manifest" href="/manifest.json"/>
    <meta name="theme-color" content="#ffffff"/>
    <title>libvirt: Guest migration</title>
    <meta name="description" content="libvirt, virtualization, virtualization API"/>
    <script type="text/javascript">
      <!--
          
      function init() {
      window.addEventListener('scroll', function(e){
              var distanceY = window.pageYOffset || document.documentElement.scrollTop,
              shrinkOn = 94
              home = document.getElementById("home");
              links = document.getElementById("jumplinks");
              search = document.getElementById("search");
              body = document.getElementById("body");
              if (distanceY > shrinkOn) {
                  if (home.className != "navhide") {
                      body.className = "navhide"
                      home.className = "navhide"
                      links.className = "navhide"
                      search.className = "navhide"
                  }
              } else {
                  if (home.className == "navhide") {
                      body.className = ""
                      home.className = ""
                      links.className = ""
                      search.className = ""
                  }
              }
      });
      }
      window.onload = init();
           
          -->
    </script>
  </head>
  <body>
    <div id="body">
      <div id="content">
        <h1>Guest migration</h1>
        <ul>
          <li>
            <a href="#transport">Network data transports</a>
            <ul>
              <li>
                <a href="#transportnative">Hypervisor native transport</a>
              </li>
              <li>
                <a href="#transporttunnel">libvirt tunnelled transport</a>
              </li>
            </ul>
          </li>
          <li>
            <a href="#flow">Communication control paths/flows</a>
            <ul>
              <li>
                <a href="#flowmanageddirect">Managed direct migration</a>
              </li>
              <li>
                <a href="#flowpeer2peer">Managed peer to peer migration</a>
              </li>
              <li>
                <a href="#flowunmanageddirect">Unmanaged direct migration</a>
              </li>
            </ul>
          </li>
          <li>
            <a href="#security">Data security</a>
          </li>
          <li>
            <a href="#offline">Offline migration</a>
          </li>
          <li>
            <a href="#uris">Migration URIs</a>
          </li>
          <li>
            <a href="#config">Configuration file handling</a>
          </li>
          <li>
            <a href="#scenarios">Migration scenarios</a>
            <ul>
              <li>
                <a href="#scenarionativedirect">Native migration, client to two libvirtd servers</a>
              </li>
              <li>
                <a href="#scenarionativepeer2peer">Native migration, client to and peer2peer between, two libvirtd servers</a>
              </li>
              <li>
                <a href="#scenariotunnelpeer2peer1">Tunnelled migration, client and peer2peer between two libvirtd servers</a>
              </li>
              <li>
                <a href="#nativedirectunmanaged">Native migration, client to one libvirtd server</a>
              </li>
              <li>
                <a href="#nativepeer2peer">Native migration, peer2peer between two libvirtd servers</a>
              </li>
              <li>
                <a href="#scenariotunnelpeer2peer2">Tunnelled migration, peer2peer between two libvirtd servers</a>
              </li>
            </ul>
          </li>
        </ul>
        <p>
      Migration of guests between hosts is a complicated problem with many possible
      solutions, each with their own positive and negative points. For maximum
      flexibility of both hypervisor integration, and administrator deployment,
      libvirt implements several options for migration.
    </p>
        <h2>
          <a id="transport">Network data transports</a>
          <a class="headerlink" href="#transport" title="Permalink to this headline"></a>
        </h2>
        <p>
      There are two options for the data transport used during migration, either
      the hypervisor's own <strong>native</strong> transport, or <strong>tunnelled</strong>
      over a libvirtd connection.
    </p>
        <h3>
          <a id="transportnative">Hypervisor native transport</a>
          <a class="headerlink" href="#transportnative" title="Permalink to this headline"></a>
        </h3>
        <p><em>Native</em> data transports may or may not support encryption, depending
      on the hypervisor in question, but will typically have the lowest computational costs
      by minimising the number of data copies involved. The native data transports will also
      require extra hypervisor-specific network configuration steps by the administrator when
      deploying a host. For some hypervisors, it might be necessary to open up a large range
      of ports on the firewall to allow multiple concurrent migration operations.
    </p>
        <p>
          <img class="diagram" src="migration-native.png" alt="Migration native path"/>
        </p>
        <h3>
          <a id="transporttunnel">libvirt tunnelled transport</a>
          <a class="headerlink" href="#transporttunnel" title="Permalink to this headline"></a>
        </h3>
        <p><em>Tunnelled</em> data transports will always be capable of strong encryption
      since they are able to leverage the capabilities built in to the libvirt RPC protocol.
      The downside of a tunnelled transport, however, is that there will be extra data copies
      involved on both the source and destinations hosts as the data is moved between libvirtd
      and the hypervisor. This is likely to be a more significant problem for guests with
      very large RAM sizes, which dirty memory pages quickly. On the deployment side, tunnelled
      transports do not require any extra network configuration over and above what's already
      required for general libvirtd <a href="remote.html">remote access</a>, and there is only
      need for a single port to be open on the firewall to support multiple concurrent
      migration operations.
    </p>
        <p>
          <img class="diagram" src="migration-tunnel.png" alt="Migration tunnel path"/>
        </p>
        <h2>
          <a id="flow">Communication control paths/flows</a>
          <a class="headerlink" href="#flow" title="Permalink to this headline"></a>
        </h2>
        <p>
      Migration of virtual machines requires close co-ordination of the two
      hosts involved, as well as the application invoking the migration,
      which may be on the source, the destination, or a third host.
    </p>
        <h3>
          <a id="flowmanageddirect">Managed direct migration</a>
          <a class="headerlink" href="#flowmanageddirect" title="Permalink to this headline"></a>
        </h3>
        <p>
      With <em>managed direct</em> migration, the libvirt client process
      controls the various phases of migration. The client application must
      be able to connect and authenticate with the libvirtd daemons on both
      the source and destination hosts. There is no need for the two libvirtd
      daemons to communicate with each other. If the client application
      crashes, or otherwise loses its connection to libvirtd during the
      migration process, an attempt will be made to abort the migration and
      restart the guest CPUs on the source host. There may be scenarios
      where this cannot be safely done, in which cases the guest will be
      left paused on one or both of the hosts.
    </p>
        <p>
          <img class="diagram" src="migration-managed-direct.png" alt="Migration direct, managed"/>
        </p>
        <h3>
          <a id="flowpeer2peer">Managed peer to peer migration</a>
          <a class="headerlink" href="#flowpeer2peer" title="Permalink to this headline"></a>
        </h3>
        <p>
      With <em>peer to peer</em> migration, the libvirt client process only
      talks to the libvirtd daemon on the source host. The source libvirtd
      daemon controls the entire migration process itself, by directly
      connecting the destination host libvirtd. If the client application crashes,
      or otherwise loses its connection to libvirtd, the migration process
      will continue uninterrupted until completion.  Note that the
      source libvirtd uses its own credentials (typically root) to
      connect to the destination, rather than the credentials used
      by the client to connect to the source; if these differ, it is
      common to run into a situation where a client can connect to the
      destination directly but the source cannot make the connection to
      set up the peer-to-peer migration.
    </p>
        <p>
          <img class="diagram" src="migration-managed-p2p.png" alt="Migration peer-to-peer"/>
        </p>
        <h3>
          <a id="flowunmanageddirect">Unmanaged direct migration</a>
          <a class="headerlink" href="#flowunmanageddirect" title="Permalink to this headline"></a>
        </h3>
        <p>
      With <em>unmanaged direct</em> migration, neither the libvirt client
      or libvirtd daemon control the migration process. Control is instead
      delegated to the hypervisor's over management services (if any). The
      libvirt client merely initiates the migration via the hypervisor's
      management layer. If the libvirt client or libvirtd crash, the
      migration process will continue uninterrupted until completion.
    </p>
        <p>
          <img class="diagram" src="migration-unmanaged-direct.png" alt="Migration direct, unmanaged"/>
        </p>
        <h2>
          <a id="security">Data security</a>
          <a class="headerlink" href="#security" title="Permalink to this headline"></a>
        </h2>
        <p>
      Since the migration data stream includes a complete copy of the guest
      OS RAM, snooping of the migration data stream may allow compromise
      of sensitive guest information. If the virtualization hosts have
      multiple network interfaces, or if the network switches support
      tagged VLANs, then it is very desirable to separate guest network
      traffic from migration or management traffic.
    </p>
        <p>
      In some scenarios, even a separate network for migration data may
      not offer sufficient security. In this case it is possible to apply
      encryption to the migration data stream. If the hypervisor does not
      itself offer encryption, then the libvirt tunnelled migration
      facility should be used.
    </p>
        <h2>
          <a id="offline">Offline migration</a>
          <a class="headerlink" href="#offline" title="Permalink to this headline"></a>
        </h2>
        <p>
      Offline migration transfers inactive the definition of a domain
      (which may or may not be active). After successful completion, the
      domain remains in its current state on the source host and is defined
      but inactive on the destination host. It's a bit more clever than
      <code>virsh dumpxml</code> on source host followed by
      <code>virsh define</code> on destination host, as offline migration
      will run the pre-migration hook to update the domain XML on
      destination host. Currently, copying non-shared storage or other file
      based storages (e.g. UEFI variable storage) is not supported during
      offline migration.
    </p>
        <h2>
          <a id="uris">Migration URIs</a>
          <a class="headerlink" href="#uris" title="Permalink to this headline"></a>
        </h2>
        <p>
      Initiating a guest migration requires the client application to
      specify up to three URIs, depending on the choice of control
      flow and/or APIs used. The first URI is that of the libvirt
      connection to the source host, where the virtual guest is
      currently running. The second URI is that of the libvirt
      connection to the destination host, where the virtual guest
      will be moved to (and in peer-to-peer migrations, this is from
      the perspective of the source, not the client). The third URI is
      a hypervisor specific
      URI used to control how the guest will be migrated. With
      any managed migration flow, the first and second URIs are
      compulsory, while the third URI is optional. With the
      unmanaged direct migration mode, the first and third URIs are
      compulsory and the second URI is not used.
    </p>
        <p>
      Ordinarily management applications only need to care about the
      first and second URIs, which are both in the normal libvirt
      connection URI format. Libvirt will then automatically determine
      the hypervisor specific URI, by looking up the target host's
      configured hostname. There are a few scenarios where the management
      application may wish to have direct control over the third URI.
    </p>
        <ol>
          <li>The configured hostname is incorrect, or DNS is broken. If a
        host has a hostname which will not resolve to match one of its
        public IP addresses, then libvirt will generate an incorrect
        URI. In this case the management application should specify the
        hypervisor specific URI explicitly, using an IP address, or a
        correct hostname.</li>
          <li>The host has multiple network interfaces. If a host has multiple
        network interfaces, it might be desirable for the migration data
        stream to be sent over a specific interface for either security
        or performance reasons. In this case the management application
        should specify the hypervisor specific URI, using an IP address
        associated with the network to be used.</li>
          <li>The firewall restricts what ports are available. When libvirt
        generates a migration URI it will pick a port number using hypervisor
        specific rules. Some hypervisors only require a single port to be
        open in the firewalls, while others require a whole range of port
        numbers. In the latter case the management application may wish
        to choose a specific port number outside the default range in order
        to comply with local firewall policies.</li>
        </ol>
        <h2>
          <a id="config">Configuration file handling</a>
          <a class="headerlink" href="#config" title="Permalink to this headline"></a>
        </h2>
        <p>
      There are two types of virtual machines known to libvirt. A <em>transient</em>
      guest only exists while it is running, and has no configuration file stored
      on disk. A <em>persistent</em> guest maintains a configuration file on disk
      even when it is not running.
    </p>
        <p>
      By default, a migration operation will not attempt to modify any configuration
      files that may be stored on either the source or destination host. It is the
      administrator, or management application's, responsibility to manage distribution
      of configuration files (if desired). It is important to note that the <code>/etc/libvirt</code>
      directory <strong>MUST NEVER BE SHARED BETWEEN HOSTS</strong>. There are some
      typical scenarios that might be applicable:
    </p>
        <ul>
          <li>Centralized configuration files outside libvirt, in shared storage. A cluster
        aware  management application may maintain all the master guest configuration
        files in a cluster filesystem. When attempting to start a guest, the config
        will be read from the cluster FS and used to deploy a persistent guest.
        For migration the configuration will need to be copied to the destination
        host and removed on the original.
      </li>
          <li>Centralized configuration files outside libvirt, in a database. A data center
        management application may not store configuration files at all. Instead it
        may generate libvirt XML on the fly when a guest is booted. It will typically
        use transient guests, and thus not have to consider configuration files during
        migration.
      </li>
          <li>Distributed configuration inside libvirt. The configuration file for each
        guest is copied to every host where the guest is able to run. Upon migration
        the existing config merely needs to be updated with any changes.
      </li>
          <li>Ad-hoc configuration management inside libvirt. Each guest is tied to a
        specific host and rarely migrated. When migration is required, the config
        is moved from one host to the other.
      </li>
        </ul>
        <p>
      As mentioned above, libvirt will not modify configuration files during
      migration by default. The <code>virsh</code> command has two flags to
      influence this behaviour. The  <code>--undefine-source</code> flag
      will cause the configuration file to be removed on the source host
      after a successful migration. The <code>--persist</code> flag will
      cause a configuration file to be created on the destination host
      after a successful migration. The following table summarizes the
      configuration file handling in all possible state and flag
      combinations.
    </p>
        <table class="data">
          <thead>
            <tr class="head">
              <th colspan="3">Before migration</th>
              <th colspan="2">Flags</th>
              <th colspan="3">After migration</th>
            </tr>
            <tr class="subhead">
              <th>Source type</th>
              <th>Source config</th>
              <th>Dest config</th>
              <th>--undefine-source</th>
              <th>--persist</th>
              <th>Dest type</th>
              <th>Source config</th>
              <th>Dest config</th>
            </tr>
          </thead>
          <tbody>
            <!-- src:N, dst:N -->
            <tr>
              <td>Transient</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td>Transient</td>
              <td class="n">N</td>
              <td class="n">N</td>
            </tr>
            <tr>
              <td>Transient</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td>Transient</td>
              <td class="n">N</td>
              <td class="n">N</td>
            </tr>
            <tr>
              <td>Transient</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td>Persistent</td>
              <td class="n">N</td>
              <td class="y">Y</td>
            </tr>
            <tr>
              <td>Transient</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td>Persistent</td>
              <td class="n">N</td>
              <td class="y">Y</td>
            </tr>
            <!-- src:N, dst:Y -->
            <tr>
              <td>Transient</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td>Persistent</td>
              <td class="n">N</td>
              <td class="y">Y<br/>(unchanged dest config)</td>
            </tr>
            <tr>
              <td>Transient</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td>Persistent</td>
              <td class="n">N</td>
              <td class="y">Y<br/>(unchanged dest config)</td>
            </tr>
            <tr>
              <td>Transient</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td>Persistent</td>
              <td class="n">N</td>
              <td class="y">Y<br/>(replaced with source)</td>
            </tr>
            <tr>
              <td>Transient</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td>Persistent</td>
              <td class="n">N</td>
              <td class="y">Y<br/>(replaced with source)</td>
            </tr>
            <!-- src:Y dst:N -->
            <tr>
              <td>Persistent</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td>Transient</td>
              <td class="y">Y</td>
              <td class="n">N</td>
            </tr>
            <tr>
              <td>Persistent</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td>Transient</td>
              <td class="n">N</td>
              <td class="n">N</td>
            </tr>
            <tr>
              <td>Persistent</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td>Persistent</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
            </tr>
            <tr>
              <td>Persistent</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td>Persistent</td>
              <td class="n">N</td>
              <td class="y">Y</td>
            </tr>
            <!-- src:Y dst:Y -->
            <tr>
              <td>Persistent</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td class="n">N</td>
              <td>Persistent</td>
              <td class="y">Y</td>
              <td class="y">Y<br/>(unchanged dest config)</td>
            </tr>
            <tr>
              <td>Persistent</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td>Persistent</td>
              <td class="n">N</td>
              <td class="y">Y<br/>(unchanged dest config)</td>
            </tr>
            <tr>
              <td>Persistent</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td class="n">N</td>
              <td class="y">Y</td>
              <td>Persistent</td>
              <td class="y">Y</td>
              <td class="y">Y<br/>(replaced with source)</td>
            </tr>
            <tr>
              <td>Persistent</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td class="y">Y</td>
              <td>Persistent</td>
              <td class="n">N</td>
              <td class="y">Y<br/>(replaced with source)</td>
            </tr>
          </tbody>
        </table>
        <h2>
          <a id="scenarios">Migration scenarios</a>
          <a class="headerlink" href="#scenarios" title="Permalink to this headline"></a>
        </h2>
        <h3>
          <a id="scenarionativedirect">Native migration, client to two libvirtd servers</a>
          <a class="headerlink" href="#scenarionativedirect" title="Permalink to this headline"></a>
        </h3>
        <p>
      At an API level this requires use of virDomainMigrate, without the
      VIR_MIGRATE_PEER2PEER flag set. The destination libvirtd server
      will automatically determine the native hypervisor URI for migration
      based off the primary hostname. To force migration over an alternate
      network interface the optional hypervisor specific URI must be provided
    </p>
        <pre>
syntax: virsh migrate GUESTNAME DEST-LIBVIRT-URI [HV-URI]


eg using default network interface

virsh migrate web1 qemu+ssh://desthost/system
virsh migrate web1 xen+tls://desthost/system


eg using secondary network interface

virsh migrate web1 qemu://desthost/system tcp://10.0.0.1/
virsh migrate web1 xen+tcp://desthost/system  xenmigr:10.0.0.1/
    </pre>
        <p>
      Supported by Xen, QEMU, VMware and VirtualBox drivers
    </p>
        <h3>
          <a id="scenarionativepeer2peer">Native migration, client to and peer2peer between, two libvirtd servers</a>
          <a class="headerlink" href="#scenarionativepeer2peer" title="Permalink to this headline"></a>
        </h3>
        <p>
      virDomainMigrate, with the VIR_MIGRATE_PEER2PEER flag set,
      using the libvirt URI format for the 'uri' parameter. The
      destination libvirtd server will automatically determine
      the native hypervisor URI for migration, based off the
      primary hostname. The optional uri parameter controls how
      the source libvirtd connects to the destination libvirtd,
      in case it is not accessible using the same address that
      the client uses to connect to the destination, or a different
      encryption/auth scheme is required. There is no
      scope for forcing an alternative network interface for the
      native migration data with this method.
    </p>
        <p>
      This mode cannot be invoked from virsh
    </p>
        <p>
      Supported by QEMU driver
    </p>
        <h3>
          <a id="scenariotunnelpeer2peer1">Tunnelled migration, client and peer2peer between two libvirtd servers</a>
          <a class="headerlink" href="#scenariotunnelpeer2peer1" title="Permalink to this headline"></a>
        </h3>
        <p>
      virDomainMigrate, with the VIR_MIGRATE_PEER2PEER &amp; VIR_MIGRATE_TUNNELLED
      flags set, using the libvirt URI format for the 'uri' parameter. The
      destination libvirtd server will automatically determine
      the native hypervisor URI for migration, based off the
      primary hostname. The optional uri parameter controls how
      the source libvirtd connects to the destination libvirtd,
      in case it is not accessible using the same address that
      the client uses to connect to the destination, or a different
      encryption/auth scheme is required. The native hypervisor URI
      format is not used at all.
    </p>
        <p>
      This mode cannot be invoked from virsh
    </p>
        <p>
      Supported by QEMU driver
    </p>
        <h3>
          <a id="nativedirectunmanaged">Native migration, client to one libvirtd server</a>
          <a class="headerlink" href="#nativedirectunmanaged" title="Permalink to this headline"></a>
        </h3>
        <p>
      virDomainMigrateToURI, without the VIR_MIGRATE_PEER2PEER flag set,
      using a hypervisor specific URI format for the 'uri' parameter.
      There is no use or requirement for a destination libvirtd instance
      at all. This is typically used when the hypervisor has its own
      native management daemon available to handle incoming migration
      attempts on the destination.
    </p>
        <pre>
syntax: virsh migrate GUESTNAME HV-URI


eg using same libvirt URI for all connections

virsh migrate --direct web1 xenmigr://desthost/
    </pre>
        <p>
      Supported by Xen driver
    </p>
        <h3>
          <a id="nativepeer2peer">Native migration, peer2peer between two libvirtd servers</a>
          <a class="headerlink" href="#nativepeer2peer" title="Permalink to this headline"></a>
        </h3>
        <p>
      virDomainMigrateToURI, with the VIR_MIGRATE_PEER2PEER flag set,
      using the libvirt URI format for the 'uri' parameter. The
      destination libvirtd server will automatically determine
      the native hypervisor URI for migration, based off the
      primary hostname. There is no scope for forcing an alternative
      network interface for the native migration data with this
      method.  The destination URI must be reachable using the source
      libvirtd credentials (which are not necessarily the same as the
      credentials of the client in connecting to the source).
    </p>
        <pre>
syntax: virsh migrate GUESTNAME DEST-LIBVIRT-URI [ALT-DEST-LIBVIRT-URI]


eg using same libvirt URI for all connections

virsh migrate --p2p web1 qemu+ssh://desthost/system


eg using different libvirt URI auth scheme for peer2peer connections

virsh migrate --p2p web1 qemu+ssh://desthost/system qemu+tls:/desthost/system


eg using different libvirt URI hostname for peer2peer connections

virsh migrate --p2p web1 qemu+ssh://desthost/system qemu+ssh://10.0.0.1/system
    </pre>
        <p>
      Supported by the QEMU driver
    </p>
        <h3>
          <a id="scenariotunnelpeer2peer2">Tunnelled migration, peer2peer between two libvirtd servers</a>
          <a class="headerlink" href="#scenariotunnelpeer2peer2" title="Permalink to this headline"></a>
        </h3>
        <p>
      virDomainMigrateToURI, with the VIR_MIGRATE_PEER2PEER &amp; VIR_MIGRATE_TUNNELLED
      flags set, using the libvirt URI format for the 'uri' parameter. The
      destination libvirtd server will automatically determine
      the native hypervisor URI for migration, based off the
      primary hostname. The optional uri parameter controls how
      the source libvirtd connects to the destination libvirtd,
      in case it is not accessible using the same address that
      the client uses to connect to the destination, or a different
      encryption/auth scheme is required. The native hypervisor URI
      format is not used at all.  The destination URI must be
      reachable using the source libvirtd credentials (which are not
      necessarily the same as the credentials of the client in
      connecting to the source).
    </p>
        <pre>
syntax: virsh migrate GUESTNAME DEST-LIBVIRT-URI [ALT-DEST-LIBVIRT-URI]


eg using same libvirt URI for all connections

virsh migrate --p2p --tunnelled web1 qemu+ssh://desthost/system


eg using different libvirt URI auth scheme for peer2peer connections

virsh migrate --p2p --tunnelled web1 qemu+ssh://desthost/system qemu+tls:/desthost/system


eg using different libvirt URI hostname for peer2peer connections

virsh migrate --p2p --tunnelled web1 qemu+ssh://desthost/system qemu+ssh://10.0.0.1/system
    </pre>
        <p>
      Supported by QEMU driver
    </p>
      </div>
    </div>
    <div id="nav">
      <div id="home">
        <a href="index.html">Home</a>
      </div>
      <div id="jumplinks">
        <ul>
          <li>
            <a href="downloads.html">Download</a>
          </li>
          <li>
            <a href="contribute.html">Contribute</a>
          </li>
          <li>
            <a href="docs.html">Docs</a>
          </li>
        </ul>
      </div>
      <div id="search">
        <form action="search.php" enctype="application/x-www-form-urlencoded" method="get">
          <div>
            <input name="query" type="text" size="12" value=""/>
            <input name="submit" type="submit" value="Go"/>
          </div>
        </form>
      </div>
    </div>
    <div id="footer">
      <div id="contact">
        <h3>Contact</h3>
        <ul>
          <li>
            <a href="contact.html#email">email</a>
          </li>
          <li>
            <a href="contact.html#irc">irc</a>
          </li>
        </ul>
      </div>
      <div id="community">
        <h3>Community</h3>
        <ul>
          <li>
            <a href="https://twitter.com/hashtag/libvirt">twitter</a>
          </li>
          <li>
            <a href="https://plus.google.com/communities/109522598353007505282">google+</a>
          </li>
          <li>
            <a href="http://stackoverflow.com/questions/tagged/libvirt">stackoverflow</a>
          </li>
          <li>
            <a href="http://serverfault.com/questions/tagged/libvirt">serverfault</a>
          </li>
        </ul>
      </div>
      <div id="conduct">
            Participants in the libvirt project agree to abide by <a href="governance.html#codeofconduct">the project code of conduct</a></div>
      <br class="clear"/>
    </div>
  </body>
</html>