Merge lp:~ubuntu-branches/ubuntu/natty/libvirt/natty-201011111816 into lp:ubuntu/natty/libvirt

Proposed by James Westby
Status: Work in progress
Proposed branch: lp:~ubuntu-branches/ubuntu/natty/libvirt/natty-201011111816
Merge into: lp:ubuntu/natty/libvirt
Diff against target: 58849 lines (+51972/-2979) (has conflicts)
133 files modified
.pc/.quilt_patches (+1/-0)
.pc/.quilt_series (+1/-0)
.pc/0001-remove-RHism.diff.patch/tools/virsh.pod (+1220/-0)
.pc/0003-allow-libvirt-group-to-access-the-socket.patch/daemon/libvirtd.conf (+346/-0)
.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/src/xen/xen_hypervisor.c (+3588/-0)
.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/tests/xencapsdata/xen-i686-pae-hvm.xml (+0/-50)
.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/tests/xencapsdata/xen-ia64-be-hvm.xml (+0/-46)
.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/tests/xencapsdata/xen-ia64-hvm.xml (+0/-42)
.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/tests/xencapsdata/xen-x86_64-hvm.xml (+0/-63)
.pc/0006-patch-qemuMonitorTextGetMigrationStatus-to-intercept.patch/src/qemu/qemu_monitor_text.c (+2591/-0)
.pc/9000-delayed_iff_up_bridge.patch/src/util/bridge.c (+783/-0)
.pc/9001-dont_clobber_existing_bridges.patch/src/network/default.xml (+0/-10)
.pc/9002-better_default_uri_virsh.patch/tools/Makefile.am (+163/-0)
.pc/9002-better_default_uri_virsh.patch/tools/Makefile.in (+1662/-0)
.pc/9002-better_default_uri_virsh.patch/tools/virsh.c (+11651/-0)
.pc/9003-better-default-arch.patch/src/conf/capabilities.c (+0/-837)
.pc/9004-libvirtd-group-name.patch/daemon/libvirtd.conf (+346/-0)
.pc/9005-increase-unix-socket-timeout.patch/src/qemu/qemu_monitor.c (+0/-1931)
.pc/9006-default-config-test-case.patch/tests/daemon-conf (+112/-0)
.pc/9007-fix-daemon-conf-ftbfs.patch/tests/daemon-conf (+112/-0)
.pc/9009-autodetect-nc-params.patch/src/remote/remote_driver.c (+10700/-0)
.pc/9010-dont-disable-ipv6.patch/src/network/bridge_driver.c (+1903/-0)
.pc/9011-move-ebtables-script.patch/src/nwfilter/nwfilter_ebiptables_driver.c (+3739/-0)
.pc/9014-skip-nodeinfotest.patch/tests/nodeinfotest.c (+127/-0)
.pc/9020-lp545795.patch/src/util/pci.c (+1493/-0)
.pc/9021-fix-uint64_t.patch/src/lxc/lxc_container.c (+904/-0)
.pc/applied-patches (+18/-0)
.x-sc_prohibit_empty_lines_at_EOF (+1/-0)
daemon/libvirtd.stp (+65/-0)
daemon/probes.d (+12/-0)
debian/patches/9021-fix-uint64_t.patch (+17/-0)
docs/api_extension/0001-add-to-xml.patch (+145/-0)
docs/api_extension/0002-add-new-public-API.patch (+62/-0)
docs/api_extension/0003-define-internal-driver-API.patch (+222/-0)
docs/api_extension/0004-implement-the-public-APIs.patch (+188/-0)
docs/api_extension/0005-implement-the-remote-protocol.patch (+421/-0)
docs/api_extension/0006-make-old-API-trivially-wrap-to-new-API.patch (+735/-0)
docs/api_extension/0007-add-virsh-support.patch (+388/-0)
docs/api_extension/0008-support-new-xml.patch (+519/-0)
docs/api_extension/0009-support-all-flags-in-test-driver.patch (+197/-0)
docs/api_extension/0010-improve-vcpu-support-in-qemu-command-line.patch (+122/-0)
docs/api_extension/0011-complete-vcpu-support-in-qemu-driver.patch (+169/-0)
docs/api_extension/0012-improve-vcpu-support-in-xen-command-line.patch (+294/-0)
docs/api_extension/0013-improve-getting-xen-vcpu-counts.patch (+216/-0)
docs/api_extension/0014-improve-setting-xen-vcpu-counts.patch (+342/-0)
docs/api_extension/0015-remove-dead-xen-code.patch (+228/-0)
docs/csharp.html (+251/-0)
docs/csharp.html.in (+498/-0)
docs/devguide.html (+150/-0)
docs/devguide.html.in (+55/-0)
docs/generic.css (+75/-0)
docs/libvirt.css (+357/-0)
docs/main.css (+2/-0)
docs/todo.cfg-example (+26/-0)
docs/todo.html (+83/-0)
docs/todo.html.in (+1/-0)
docs/todo.pl (+120/-0)
examples/systemtap/Makefile.am (+2/-0)
examples/systemtap/Makefile.in (+1195/-0)
examples/systemtap/client.stp (+28/-0)
gnulib/lib/md5.c (+464/-0)
gnulib/lib/md5.h (+128/-0)
gnulib/lib/termios.in.h (+67/-0)
gnulib/m4/func.m4 (+22/-0)
gnulib/m4/getpagesize.m4 (+35/-0)
gnulib/m4/md5.m4 (+18/-0)
gnulib/m4/poll_h.m4 (+57/-0)
gnulib/m4/socketlib.m4 (+88/-0)
gnulib/m4/termios_h.m4 (+44/-0)
gnulib/tests/getpagesize.c (+41/-0)
gnulib/tests/test-func.c (+42/-0)
gnulib/tests/test-md5.c (+69/-0)
gnulib/tests/test-poll-h.c (+36/-0)
gnulib/tests/test-strnlen.c (+70/-0)
gnulib/tests/test-sys_wait.h (+55/-0)
gnulib/tests/test-termios.c (+29/-0)
src/util/files.c (+46/-0)
src/util/files.h (+46/-0)
src/util/virtaudit.c (+151/-0)
src/util/virtaudit.h (+60/-0)
tests/nwfilterxml2xmlin/comment-test.xml (+71/-0)
tests/nwfilterxml2xmlin/example-1.xml (+24/-0)
tests/nwfilterxml2xmlin/example-2.xml (+37/-0)
tests/nwfilterxml2xmlout/comment-test.xml (+30/-0)
tests/nwfilterxml2xmlout/example-1.xml (+15/-0)
tests/nwfilterxml2xmlout/example-2.xml (+21/-0)
tests/qemuhelpdata/kvm-83-rhel56 (+141/-0)
tests/qemuxml2argvdata/qemuxml2argv-disk-drive-readonly-no-device.args (+1/-0)
tests/qemuxml2argvdata/qemuxml2argv-disk-drive-readonly-no-device.xml (+31/-0)
tests/qemuxml2argvdata/qemuxml2argv-fs9p.args (+1/-0)
tests/qemuxml2argvdata/qemuxml2argv-fs9p.xml (+28/-0)
tests/qemuxml2argvdata/qemuxml2argv-memtune.args (+1/-0)
tests/qemuxml2argvdata/qemuxml2argv-memtune.xml (+30/-0)
tests/qemuxml2argvdata/qemuxml2argv-smp.args (+1/-0)
tests/qemuxml2argvdata/qemuxml2argv-smp.xml (+28/-0)
tests/sexpr2xmldata/sexpr2xml-boot-grub.sexpr (+1/-0)
tests/sexpr2xmldata/sexpr2xml-boot-grub.xml (+26/-0)
tests/sexpr2xmldata/sexpr2xml-disk-drv-blktap2-raw.sexpr (+1/-0)
tests/sexpr2xmldata/sexpr2xml-disk-drv-blktap2-raw.xml (+27/-0)
tests/sexpr2xmldata/sexpr2xml-pv-vcpus.sexpr (+1/-0)
tests/sexpr2xmldata/sexpr2xml-pv-vcpus.xml (+27/-0)
tests/sockettest.c (+259/-0)
tests/storagepoolxml2xmlin/pool-iscsi-vendor-product.xml (+19/-0)
tests/storagepoolxml2xmlout/pool-iscsi-vendor-product.xml (+22/-0)
tests/virbuftest.c (+86/-0)
tests/vmx2xmldata/vmx2xml-annotation.vmx (+3/-0)
tests/vmx2xmldata/vmx2xml-annotation.xml (+16/-0)
tests/vmx2xmldata/vmx2xml-esx-in-the-wild-5.vmx (+97/-0)
tests/vmx2xmldata/vmx2xml-esx-in-the-wild-5.xml (+34/-0)
tests/vmx2xmldata/vmx2xml-serial-network-client.vmx (+6/-0)
tests/vmx2xmldata/vmx2xml-serial-network-client.xml (+25/-0)
tests/vmx2xmldata/vmx2xml-serial-network-server.vmx (+6/-0)
tests/vmx2xmldata/vmx2xml-serial-network-server.xml (+25/-0)
tests/xmconfigdata/sexpr2xml-pv-bootloader.cfg (+11/-0)
tests/xmconfigdata/sexpr2xml-pv-bootloader.xml (+26/-0)
tests/xmconfigdata/test-paravirt-vcpu.cfg (+12/-0)
tests/xmconfigdata/test-paravirt-vcpu.xml (+30/-0)
tests/xml2sexprdata/xml2sexpr-boot-grub.sexpr (+1/-0)
tests/xml2sexprdata/xml2sexpr-boot-grub.xml (+21/-0)
tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2-raw.sexpr (+1/-0)
tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2-raw.xml (+23/-0)
tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2.sexpr (+1/-0)
tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2.xml (+23/-0)
tests/xml2sexprdata/xml2sexpr-pv-vcpus.sexpr (+1/-0)
tests/xml2sexprdata/xml2sexpr-pv-vcpus.xml (+22/-0)
tests/xml2vmxdata/xml2vmx-annotation.vmx (+11/-0)
tests/xml2vmxdata/xml2vmx-annotation.xml (+9/-0)
tests/xml2vmxdata/xml2vmx-serial-network-client.vmx (+15/-0)
tests/xml2vmxdata/xml2vmx-serial-network-client.xml (+15/-0)
tests/xml2vmxdata/xml2vmx-serial-network-server.vmx (+15/-0)
tests/xml2vmxdata/xml2vmx-serial-network-server.xml (+15/-0)
tools/libvirt-guests.init.in (+341/-0)
tools/libvirt-guests.sysconf (+24/-0)
Conflict: can't delete .pc/0001-remove-RHism.diff.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/0001-remove-RHism.diff.patch.  Moved existing file to .pc/0001-remove-RHism.diff.patch.moved.
Conflict because .pc/0001-remove-RHism.diff.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/0001-remove-RHism.diff.patch.moved/tools because it is not empty.  Not deleting.
Conflict because .pc/0001-remove-RHism.diff.patch.moved/tools is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/0001-remove-RHism.diff.patch.moved/tools/virsh.pod
Conflict: can't delete .pc/0003-allow-libvirt-group-to-access-the-socket.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/0003-allow-libvirt-group-to-access-the-socket.patch.  Moved existing file to .pc/0003-allow-libvirt-group-to-access-the-socket.patch.moved.
Conflict because .pc/0003-allow-libvirt-group-to-access-the-socket.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/0003-allow-libvirt-group-to-access-the-socket.patch.moved/daemon because it is not empty.  Not deleting.
Conflict because .pc/0003-allow-libvirt-group-to-access-the-socket.patch.moved/daemon is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/0003-allow-libvirt-group-to-access-the-socket.patch.moved/daemon/libvirtd.conf
Conflict: can't delete .pc/0004-fix-Debian-specific-path-to-hvm-loader.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/0004-fix-Debian-specific-path-to-hvm-loader.patch.  Moved existing file to .pc/0004-fix-Debian-specific-path-to-hvm-loader.patch.moved.
Conflict because .pc/0004-fix-Debian-specific-path-to-hvm-loader.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/0004-fix-Debian-specific-path-to-hvm-loader.patch.moved/src because it is not empty.  Not deleting.
Conflict because .pc/0004-fix-Debian-specific-path-to-hvm-loader.patch.moved/src is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/0004-fix-Debian-specific-path-to-hvm-loader.patch.moved/src/xen because it is not empty.  Not deleting.
Conflict because .pc/0004-fix-Debian-specific-path-to-hvm-loader.patch.moved/src/xen is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/0004-fix-Debian-specific-path-to-hvm-loader.patch.moved/src/xen/xen_hypervisor.c
Conflict: can't delete .pc/0006-patch-qemuMonitorTextGetMigrationStatus-to-intercept.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/0006-patch-qemuMonitorTextGetMigrationStatus-to-intercept.patch.  Moved existing file to .pc/0006-patch-qemuMonitorTextGetMigrationStatus-to-intercept.patch.moved.
Conflict because .pc/0006-patch-qemuMonitorTextGetMigrationStatus-to-intercept.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/0006-patch-qemuMonitorTextGetMigrationStatus-to-intercept.patch.moved/src because it is not empty.  Not deleting.
Conflict because .pc/0006-patch-qemuMonitorTextGetMigrationStatus-to-intercept.patch.moved/src is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/0006-patch-qemuMonitorTextGetMigrationStatus-to-intercept.patch.moved/src/qemu because it is not empty.  Not deleting.
Conflict because .pc/0006-patch-qemuMonitorTextGetMigrationStatus-to-intercept.patch.moved/src/qemu is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/0006-patch-qemuMonitorTextGetMigrationStatus-to-intercept.patch.moved/src/qemu/qemu_monitor_text.c
Conflict: can't delete .pc/9000-delayed_iff_up_bridge.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/9000-delayed_iff_up_bridge.patch.  Moved existing file to .pc/9000-delayed_iff_up_bridge.patch.moved.
Conflict because .pc/9000-delayed_iff_up_bridge.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9000-delayed_iff_up_bridge.patch.moved/src because it is not empty.  Not deleting.
Conflict because .pc/9000-delayed_iff_up_bridge.patch.moved/src is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9000-delayed_iff_up_bridge.patch.moved/src/util because it is not empty.  Not deleting.
Conflict because .pc/9000-delayed_iff_up_bridge.patch.moved/src/util is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/9000-delayed_iff_up_bridge.patch.moved/src/util/bridge.c
Conflict: can't delete .pc/9002-better_default_uri_virsh.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/9002-better_default_uri_virsh.patch.  Moved existing file to .pc/9002-better_default_uri_virsh.patch.moved.
Conflict because .pc/9002-better_default_uri_virsh.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9002-better_default_uri_virsh.patch.moved/tools because it is not empty.  Not deleting.
Conflict because .pc/9002-better_default_uri_virsh.patch.moved/tools is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/9002-better_default_uri_virsh.patch.moved/tools/Makefile.am
Contents conflict in .pc/9002-better_default_uri_virsh.patch.moved/tools/Makefile.in
Contents conflict in .pc/9002-better_default_uri_virsh.patch.moved/tools/virsh.c
Conflict: can't delete .pc/9004-libvirtd-group-name.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/9004-libvirtd-group-name.patch.  Moved existing file to .pc/9004-libvirtd-group-name.patch.moved.
Conflict because .pc/9004-libvirtd-group-name.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9004-libvirtd-group-name.patch.moved/daemon because it is not empty.  Not deleting.
Conflict because .pc/9004-libvirtd-group-name.patch.moved/daemon is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/9004-libvirtd-group-name.patch.moved/daemon/libvirtd.conf
Conflict: can't delete .pc/9006-default-config-test-case.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/9006-default-config-test-case.patch.  Moved existing file to .pc/9006-default-config-test-case.patch.moved.
Conflict because .pc/9006-default-config-test-case.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9006-default-config-test-case.patch.moved/tests because it is not empty.  Not deleting.
Conflict because .pc/9006-default-config-test-case.patch.moved/tests is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/9006-default-config-test-case.patch.moved/tests/daemon-conf
Conflict: can't delete .pc/9007-fix-daemon-conf-ftbfs.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/9007-fix-daemon-conf-ftbfs.patch.  Moved existing file to .pc/9007-fix-daemon-conf-ftbfs.patch.moved.
Conflict because .pc/9007-fix-daemon-conf-ftbfs.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9007-fix-daemon-conf-ftbfs.patch.moved/tests because it is not empty.  Not deleting.
Conflict because .pc/9007-fix-daemon-conf-ftbfs.patch.moved/tests is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/9007-fix-daemon-conf-ftbfs.patch.moved/tests/daemon-conf
Conflict: can't delete .pc/9009-autodetect-nc-params.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/9009-autodetect-nc-params.patch.  Moved existing file to .pc/9009-autodetect-nc-params.patch.moved.
Conflict because .pc/9009-autodetect-nc-params.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9009-autodetect-nc-params.patch.moved/src because it is not empty.  Not deleting.
Conflict because .pc/9009-autodetect-nc-params.patch.moved/src is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9009-autodetect-nc-params.patch.moved/src/remote because it is not empty.  Not deleting.
Conflict because .pc/9009-autodetect-nc-params.patch.moved/src/remote is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/9009-autodetect-nc-params.patch.moved/src/remote/remote_driver.c
Conflict: can't delete .pc/9010-dont-disable-ipv6.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/9010-dont-disable-ipv6.patch.  Moved existing file to .pc/9010-dont-disable-ipv6.patch.moved.
Conflict because .pc/9010-dont-disable-ipv6.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9010-dont-disable-ipv6.patch.moved/src because it is not empty.  Not deleting.
Conflict because .pc/9010-dont-disable-ipv6.patch.moved/src is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9010-dont-disable-ipv6.patch.moved/src/network because it is not empty.  Not deleting.
Conflict because .pc/9010-dont-disable-ipv6.patch.moved/src/network is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/9010-dont-disable-ipv6.patch.moved/src/network/bridge_driver.c
Conflict: can't delete .pc/9011-move-ebtables-script.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/9011-move-ebtables-script.patch.  Moved existing file to .pc/9011-move-ebtables-script.patch.moved.
Conflict because .pc/9011-move-ebtables-script.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9011-move-ebtables-script.patch.moved/src because it is not empty.  Not deleting.
Conflict because .pc/9011-move-ebtables-script.patch.moved/src is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9011-move-ebtables-script.patch.moved/src/nwfilter because it is not empty.  Not deleting.
Conflict because .pc/9011-move-ebtables-script.patch.moved/src/nwfilter is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/9011-move-ebtables-script.patch.moved/src/nwfilter/nwfilter_ebiptables_driver.c
Conflict: can't delete .pc/9014-skip-nodeinfotest.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/9014-skip-nodeinfotest.patch.  Moved existing file to .pc/9014-skip-nodeinfotest.patch.moved.
Conflict because .pc/9014-skip-nodeinfotest.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9014-skip-nodeinfotest.patch.moved/tests because it is not empty.  Not deleting.
Conflict because .pc/9014-skip-nodeinfotest.patch.moved/tests is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/9014-skip-nodeinfotest.patch.moved/tests/nodeinfotest.c
Conflict: can't delete .pc/9020-lp545795.patch.moved because it is not empty.  Not deleting.
Conflict adding file .pc/9020-lp545795.patch.  Moved existing file to .pc/9020-lp545795.patch.moved.
Conflict because .pc/9020-lp545795.patch.moved is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9020-lp545795.patch.moved/src because it is not empty.  Not deleting.
Conflict because .pc/9020-lp545795.patch.moved/src is not versioned, but has versioned children.  Versioned directory.
Conflict: can't delete .pc/9020-lp545795.patch.moved/src/util because it is not empty.  Not deleting.
Conflict because .pc/9020-lp545795.patch.moved/src/util is not versioned, but has versioned children.  Versioned directory.
Contents conflict in .pc/9020-lp545795.patch.moved/src/util/pci.c
Conflict adding file .pc/9021-fix-uint64_t.patch.  Moved existing file to .pc/9021-fix-uint64_t.patch.moved.
Contents conflict in .pc/applied-patches
Conflict adding file .x-sc_prohibit_empty_lines_at_EOF.  Moved existing file to .x-sc_prohibit_empty_lines_at_EOF.moved.
Conflict adding file daemon/libvirtd.stp.  Moved existing file to daemon/libvirtd.stp.moved.
Conflict adding file daemon/probes.d.  Moved existing file to daemon/probes.d.moved.
Conflict adding file debian/patches/9021-fix-uint64_t.patch.  Moved existing file to debian/patches/9021-fix-uint64_t.patch.moved.
Conflict adding file docs/api_extension/0001-add-to-xml.patch.  Moved existing file to docs/api_extension/0001-add-to-xml.patch.moved.
Conflict adding file docs/api_extension/0002-add-new-public-API.patch.  Moved existing file to docs/api_extension/0002-add-new-public-API.patch.moved.
Conflict adding file docs/api_extension/0003-define-internal-driver-API.patch.  Moved existing file to docs/api_extension/0003-define-internal-driver-API.patch.moved.
Conflict adding file docs/api_extension/0004-implement-the-public-APIs.patch.  Moved existing file to docs/api_extension/0004-implement-the-public-APIs.patch.moved.
Conflict adding file docs/api_extension/0005-implement-the-remote-protocol.patch.  Moved existing file to docs/api_extension/0005-implement-the-remote-protocol.patch.moved.
Conflict adding file docs/api_extension/0006-make-old-API-trivially-wrap-to-new-API.patch.  Moved existing file to docs/api_extension/0006-make-old-API-trivially-wrap-to-new-API.patch.moved.
Conflict adding file docs/api_extension/0007-add-virsh-support.patch.  Moved existing file to docs/api_extension/0007-add-virsh-support.patch.moved.
Conflict adding file docs/api_extension/0008-support-new-xml.patch.  Moved existing file to docs/api_extension/0008-support-new-xml.patch.moved.
Conflict adding file docs/api_extension/0009-support-all-flags-in-test-driver.patch.  Moved existing file to docs/api_extension/0009-support-all-flags-in-test-driver.patch.moved.
Conflict adding file docs/api_extension/0010-improve-vcpu-support-in-qemu-command-line.patch.  Moved existing file to docs/api_extension/0010-improve-vcpu-support-in-qemu-command-line.patch.moved.
Conflict adding file docs/api_extension/0011-complete-vcpu-support-in-qemu-driver.patch.  Moved existing file to docs/api_extension/0011-complete-vcpu-support-in-qemu-driver.patch.moved.
Conflict adding file docs/api_extension/0012-improve-vcpu-support-in-xen-command-line.patch.  Moved existing file to docs/api_extension/0012-improve-vcpu-support-in-xen-command-line.patch.moved.
Conflict adding file docs/api_extension/0013-improve-getting-xen-vcpu-counts.patch.  Moved existing file to docs/api_extension/0013-improve-getting-xen-vcpu-counts.patch.moved.
Conflict adding file docs/api_extension/0014-improve-setting-xen-vcpu-counts.patch.  Moved existing file to docs/api_extension/0014-improve-setting-xen-vcpu-counts.patch.moved.
Conflict adding file docs/api_extension/0015-remove-dead-xen-code.patch.  Moved existing file to docs/api_extension/0015-remove-dead-xen-code.patch.moved.
Conflict adding file docs/csharp.html.in.  Moved existing file to docs/csharp.html.in.moved.
Conflict adding file docs/csharp.html.  Moved existing file to docs/csharp.html.moved.
Conflict adding file docs/devguide.html.in.  Moved existing file to docs/devguide.html.in.moved.
Conflict adding file docs/devguide.html.  Moved existing file to docs/devguide.html.moved.
Conflict adding file docs/generic.css.  Moved existing file to docs/generic.css.moved.
Conflict adding file docs/libvirt.css.  Moved existing file to docs/libvirt.css.moved.
Conflict adding file docs/main.css.  Moved existing file to docs/main.css.moved.
Conflict adding file docs/todo.cfg-example.  Moved existing file to docs/todo.cfg-example.moved.
Conflict adding file docs/todo.html.in.  Moved existing file to docs/todo.html.in.moved.
Conflict adding file docs/todo.html.  Moved existing file to docs/todo.html.moved.
Conflict adding file docs/todo.pl.  Moved existing file to docs/todo.pl.moved.
Conflict adding file examples/systemtap.  Moved existing file to examples/systemtap.moved.
Conflict adding file gnulib/lib/md5.c.  Moved existing file to gnulib/lib/md5.c.moved.
Conflict adding file gnulib/lib/md5.h.  Moved existing file to gnulib/lib/md5.h.moved.
Conflict adding file gnulib/lib/termios.in.h.  Moved existing file to gnulib/lib/termios.in.h.moved.
Conflict adding file gnulib/m4/func.m4.  Moved existing file to gnulib/m4/func.m4.moved.
Conflict adding file gnulib/m4/getpagesize.m4.  Moved existing file to gnulib/m4/getpagesize.m4.moved.
Conflict adding file gnulib/m4/md5.m4.  Moved existing file to gnulib/m4/md5.m4.moved.
Conflict adding file gnulib/m4/poll_h.m4.  Moved existing file to gnulib/m4/poll_h.m4.moved.
Conflict adding file gnulib/m4/socketlib.m4.  Moved existing file to gnulib/m4/socketlib.m4.moved.
Conflict adding file gnulib/m4/termios_h.m4.  Moved existing file to gnulib/m4/termios_h.m4.moved.
Conflict adding file gnulib/tests/getpagesize.c.  Moved existing file to gnulib/tests/getpagesize.c.moved.
Conflict adding file gnulib/tests/test-func.c.  Moved existing file to gnulib/tests/test-func.c.moved.
Conflict adding file gnulib/tests/test-md5.c.  Moved existing file to gnulib/tests/test-md5.c.moved.
Conflict adding file gnulib/tests/test-poll-h.c.  Moved existing file to gnulib/tests/test-poll-h.c.moved.
Conflict adding file gnulib/tests/test-strnlen.c.  Moved existing file to gnulib/tests/test-strnlen.c.moved.
Conflict adding file gnulib/tests/test-sys_wait.h.  Moved existing file to gnulib/tests/test-sys_wait.h.moved.
Conflict adding file gnulib/tests/test-termios.c.  Moved existing file to gnulib/tests/test-termios.c.moved.
Conflict adding file src/util/files.c.  Moved existing file to src/util/files.c.moved.
Conflict adding file src/util/files.h.  Moved existing file to src/util/files.h.moved.
Conflict adding file src/util/virtaudit.c.  Moved existing file to src/util/virtaudit.c.moved.
Conflict adding file src/util/virtaudit.h.  Moved existing file to src/util/virtaudit.h.moved.
Conflict adding file tests/nwfilterxml2xmlin/comment-test.xml.  Moved existing file to tests/nwfilterxml2xmlin/comment-test.xml.moved.
Conflict adding file tests/nwfilterxml2xmlin/example-1.xml.  Moved existing file to tests/nwfilterxml2xmlin/example-1.xml.moved.
Conflict adding file tests/nwfilterxml2xmlin/example-2.xml.  Moved existing file to tests/nwfilterxml2xmlin/example-2.xml.moved.
Conflict adding file tests/nwfilterxml2xmlout/comment-test.xml.  Moved existing file to tests/nwfilterxml2xmlout/comment-test.xml.moved.
Conflict adding file tests/nwfilterxml2xmlout/example-1.xml.  Moved existing file to tests/nwfilterxml2xmlout/example-1.xml.moved.
Conflict adding file tests/nwfilterxml2xmlout/example-2.xml.  Moved existing file to tests/nwfilterxml2xmlout/example-2.xml.moved.
Conflict adding file tests/qemuhelpdata/kvm-83-rhel56.  Moved existing file to tests/qemuhelpdata/kvm-83-rhel56.moved.
Conflict adding file tests/qemuxml2argvdata/qemuxml2argv-disk-drive-readonly-no-device.args.  Moved existing file to tests/qemuxml2argvdata/qemuxml2argv-disk-drive-readonly-no-device.args.moved.
Conflict adding file tests/qemuxml2argvdata/qemuxml2argv-disk-drive-readonly-no-device.xml.  Moved existing file to tests/qemuxml2argvdata/qemuxml2argv-disk-drive-readonly-no-device.xml.moved.
Conflict adding file tests/qemuxml2argvdata/qemuxml2argv-fs9p.args.  Moved existing file to tests/qemuxml2argvdata/qemuxml2argv-fs9p.args.moved.
Conflict adding file tests/qemuxml2argvdata/qemuxml2argv-fs9p.xml.  Moved existing file to tests/qemuxml2argvdata/qemuxml2argv-fs9p.xml.moved.
Conflict adding file tests/qemuxml2argvdata/qemuxml2argv-memtune.args.  Moved existing file to tests/qemuxml2argvdata/qemuxml2argv-memtune.args.moved.
Conflict adding file tests/qemuxml2argvdata/qemuxml2argv-memtune.xml.  Moved existing file to tests/qemuxml2argvdata/qemuxml2argv-memtune.xml.moved.
Conflict adding file tests/qemuxml2argvdata/qemuxml2argv-smp.args.  Moved existing file to tests/qemuxml2argvdata/qemuxml2argv-smp.args.moved.
Conflict adding file tests/qemuxml2argvdata/qemuxml2argv-smp.xml.  Moved existing file to tests/qemuxml2argvdata/qemuxml2argv-smp.xml.moved.
Conflict adding file tests/sexpr2xmldata/sexpr2xml-boot-grub.sexpr.  Moved existing file to tests/sexpr2xmldata/sexpr2xml-boot-grub.sexpr.moved.
Conflict adding file tests/sexpr2xmldata/sexpr2xml-boot-grub.xml.  Moved existing file to tests/sexpr2xmldata/sexpr2xml-boot-grub.xml.moved.
Conflict adding file tests/sexpr2xmldata/sexpr2xml-disk-drv-blktap2-raw.sexpr.  Moved existing file to tests/sexpr2xmldata/sexpr2xml-disk-drv-blktap2-raw.sexpr.moved.
Conflict adding file tests/sexpr2xmldata/sexpr2xml-disk-drv-blktap2-raw.xml.  Moved existing file to tests/sexpr2xmldata/sexpr2xml-disk-drv-blktap2-raw.xml.moved.
Conflict adding file tests/sexpr2xmldata/sexpr2xml-pv-vcpus.sexpr.  Moved existing file to tests/sexpr2xmldata/sexpr2xml-pv-vcpus.sexpr.moved.
Conflict adding file tests/sexpr2xmldata/sexpr2xml-pv-vcpus.xml.  Moved existing file to tests/sexpr2xmldata/sexpr2xml-pv-vcpus.xml.moved.
Conflict adding file tests/sockettest.c.  Moved existing file to tests/sockettest.c.moved.
Conflict adding file tests/storagepoolxml2xmlin/pool-iscsi-vendor-product.xml.  Moved existing file to tests/storagepoolxml2xmlin/pool-iscsi-vendor-product.xml.moved.
Conflict adding file tests/storagepoolxml2xmlout/pool-iscsi-vendor-product.xml.  Moved existing file to tests/storagepoolxml2xmlout/pool-iscsi-vendor-product.xml.moved.
Conflict adding file tests/virbuftest.c.  Moved existing file to tests/virbuftest.c.moved.
Conflict adding file tests/vmx2xmldata/vmx2xml-annotation.vmx.  Moved existing file to tests/vmx2xmldata/vmx2xml-annotation.vmx.moved.
Conflict adding file tests/vmx2xmldata/vmx2xml-annotation.xml.  Moved existing file to tests/vmx2xmldata/vmx2xml-annotation.xml.moved.
Conflict adding file tests/vmx2xmldata/vmx2xml-esx-in-the-wild-5.vmx.  Moved existing file to tests/vmx2xmldata/vmx2xml-esx-in-the-wild-5.vmx.moved.
Conflict adding file tests/vmx2xmldata/vmx2xml-esx-in-the-wild-5.xml.  Moved existing file to tests/vmx2xmldata/vmx2xml-esx-in-the-wild-5.xml.moved.
Conflict adding file tests/vmx2xmldata/vmx2xml-serial-network-client.vmx.  Moved existing file to tests/vmx2xmldata/vmx2xml-serial-network-client.vmx.moved.
Conflict adding file tests/vmx2xmldata/vmx2xml-serial-network-client.xml.  Moved existing file to tests/vmx2xmldata/vmx2xml-serial-network-client.xml.moved.
Conflict adding file tests/vmx2xmldata/vmx2xml-serial-network-server.vmx.  Moved existing file to tests/vmx2xmldata/vmx2xml-serial-network-server.vmx.moved.
Conflict adding file tests/vmx2xmldata/vmx2xml-serial-network-server.xml.  Moved existing file to tests/vmx2xmldata/vmx2xml-serial-network-server.xml.moved.
Conflict adding file tests/xmconfigdata/sexpr2xml-pv-bootloader.cfg.  Moved existing file to tests/xmconfigdata/sexpr2xml-pv-bootloader.cfg.moved.
Conflict adding file tests/xmconfigdata/sexpr2xml-pv-bootloader.xml.  Moved existing file to tests/xmconfigdata/sexpr2xml-pv-bootloader.xml.moved.
Conflict adding file tests/xmconfigdata/test-paravirt-vcpu.cfg.  Moved existing file to tests/xmconfigdata/test-paravirt-vcpu.cfg.moved.
Conflict adding file tests/xmconfigdata/test-paravirt-vcpu.xml.  Moved existing file to tests/xmconfigdata/test-paravirt-vcpu.xml.moved.
Conflict adding file tests/xml2sexprdata/xml2sexpr-boot-grub.sexpr.  Moved existing file to tests/xml2sexprdata/xml2sexpr-boot-grub.sexpr.moved.
Conflict adding file tests/xml2sexprdata/xml2sexpr-boot-grub.xml.  Moved existing file to tests/xml2sexprdata/xml2sexpr-boot-grub.xml.moved.
Conflict adding file tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2-raw.sexpr.  Moved existing file to tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2-raw.sexpr.moved.
Conflict adding file tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2-raw.xml.  Moved existing file to tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2-raw.xml.moved.
Conflict adding file tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2.sexpr.  Moved existing file to tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2.sexpr.moved.
Conflict adding file tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2.xml.  Moved existing file to tests/xml2sexprdata/xml2sexpr-disk-drv-blktap2.xml.moved.
Conflict adding file tests/xml2sexprdata/xml2sexpr-pv-vcpus.sexpr.  Moved existing file to tests/xml2sexprdata/xml2sexpr-pv-vcpus.sexpr.moved.
Conflict adding file tests/xml2sexprdata/xml2sexpr-pv-vcpus.xml.  Moved existing file to tests/xml2sexprdata/xml2sexpr-pv-vcpus.xml.moved.
Conflict adding file tests/xml2vmxdata/xml2vmx-annotation.vmx.  Moved existing file to tests/xml2vmxdata/xml2vmx-annotation.vmx.moved.
Conflict adding file tests/xml2vmxdata/xml2vmx-annotation.xml.  Moved existing file to tests/xml2vmxdata/xml2vmx-annotation.xml.moved.
Conflict adding file tests/xml2vmxdata/xml2vmx-serial-network-client.vmx.  Moved existing file to tests/xml2vmxdata/xml2vmx-serial-network-client.vmx.moved.
Conflict adding file tests/xml2vmxdata/xml2vmx-serial-network-client.xml.  Moved existing file to tests/xml2vmxdata/xml2vmx-serial-network-client.xml.moved.
Conflict adding file tests/xml2vmxdata/xml2vmx-serial-network-server.vmx.  Moved existing file to tests/xml2vmxdata/xml2vmx-serial-network-server.vmx.moved.
Conflict adding file tests/xml2vmxdata/xml2vmx-serial-network-server.xml.  Moved existing file to tests/xml2vmxdata/xml2vmx-serial-network-server.xml.moved.
Conflict adding file tools/libvirt-guests.init.in.  Moved existing file to tools/libvirt-guests.init.in.moved.
Conflict adding file tools/libvirt-guests.sysconf.  Moved existing file to tools/libvirt-guests.sysconf.moved.
To merge this branch: bzr merge lp:~ubuntu-branches/ubuntu/natty/libvirt/natty-201011111816
Reviewer Review Type Date Requested Status
Ubuntu branches Pending
Review via email: mp+40652@code.launchpad.net

Description of the change

The package history in the archive and the history in the bzr branch differ. As the archive is authoritative the history of lp:ubuntu/natty/libvirt now reflects that and the old bzr branch has been pushed to lp:~ubuntu-branches/ubuntu/natty/libvirt/natty-201011111816. A merge should be performed if necessary.

To post a comment you must log in.

Unmerged revisions

108. By Jamie Strandboge

0.8.5 merge from Serge Hallyn

* New upstream release.
* Removed a slew of patches which have been
  applied upstream since 0.8.3.
  - 9012-apparmor-extra-tests.patch
  - 9013-apparmor-chardev.patch
  - 9015-Add-ubd-to-the-list-of-disk-prefixes.patch
  - 9016-Close-fd-s-of-persistent-tap-devices.patch
  - 9017-Make-sure-all-command-line-arguments-get-passed-to-U.patch
  - 9018-Make-umlConnectTapDevice-ask-brAddTap-for-a-persiste.patch
  - 9019-uml-fix-logic-bug-in-checking-reply-length.patch
  - 9021-Allow-chardev-of-type-file-for-UML-domains.patch
  - 9022-Rename-qemudShrinkDisks-to-virDomainDiskRemove-and-m.patch
  - 9023-Support-virDomainAttachDevice-and-virDomainDetachDev.patch
  - 9024-Explicitly-pass-uml_dir-argument-to-user-mode-linux.patch
  - 9025-Add-nwfilter-support-to-UML-driver.patch
  - 9026-Rebuild-network-filter-for-UML-guests-on-updates.patch
  - 9027-Make-newfilter-xml-transformations-endian-safe.patch
  - 9028-lp628055.patch
* Updated 9002-better_default_uri_virsh.patch to use vshStrdup,
  as now required in that file. (use of strdup now causes compilation
  to fail)
* Removed 9008-run-as-root-by-default.patch, which has not been
  applied for awhile now, with no ill effects.
* Simple refresh of:
  - 0001-remove-RHism.diff.patch
  - 0003-allow-libvirt-group-to-access-the-socket.patch
  - 0004-fix-Debian-specific-path-to-hvm-loader.patch
  - 0006-patch-qemuMonitorTextGetMigrationStatus-to-intercept.patch
  - 9000-delayed_iff_up_bridge.patch
  - 9001-dont_clobber_existing_bridges.patch
  - 9003-better-default-arch.patch
  - 9004-libvirtd-group-name.patch
  - 9005-increase-unix-socket-timeout.patch
  - 9006-default-config-test-case.patch
  - 9009-autodetect-nc-params.patch
  - 9010-dont-disable-ipv6.patch
  - 9011-move-ebtables-script.patch
  - 9014-skip-nodeinfotest.patch
  - 9020-lp545795.patch
* Create a patch to include stdint.h so lxc_container.h, which
  #includes linux/fs.h, doesn't trip up on undefined uint64_t.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file '.pc/.quilt_patches'
2--- .pc/.quilt_patches 1970-01-01 00:00:00 +0000
3+++ .pc/.quilt_patches 2010-11-11 18:22:47 +0000
4@@ -0,0 +1,1 @@
5+debian/patches
6
7=== added file '.pc/.quilt_series'
8--- .pc/.quilt_series 1970-01-01 00:00:00 +0000
9+++ .pc/.quilt_series 2010-11-11 18:22:47 +0000
10@@ -0,0 +1,1 @@
11+series
12
13=== added directory '.pc/0001-remove-RHism.diff.patch'
14=== renamed directory '.pc/0001-remove-RHism.diff.patch' => '.pc/0001-remove-RHism.diff.patch.moved'
15=== renamed file '.pc/0001-remove-RHism.diff.patch/tools/virsh.pod' => '.pc/0001-remove-RHism.diff.patch.moved/tools/virsh.pod.THIS'
16=== added file '.pc/0001-remove-RHism.diff.patch/.timestamp'
17=== added directory '.pc/0001-remove-RHism.diff.patch/tools'
18=== added file '.pc/0001-remove-RHism.diff.patch/tools/virsh.pod'
19--- .pc/0001-remove-RHism.diff.patch/tools/virsh.pod 1970-01-01 00:00:00 +0000
20+++ .pc/0001-remove-RHism.diff.patch/tools/virsh.pod 2010-11-11 18:22:47 +0000
21@@ -0,0 +1,1220 @@
22+=head1 NAME
23+
24+virsh - management user interface
25+
26+=head1 SYNOPSIS
27+
28+B<virsh> [I<OPTION>]... [I<COMMAND_STRING>]
29+
30+B<virsh> [I<OPTION>]... I<COMMAND> [I<ARG>]...
31+
32+=head1 DESCRIPTION
33+
34+The B<virsh> program is the main interface for managing virsh guest
35+domains. The program can be used to create, pause, and shutdown
36+domains. It can also be used to list current domains. Libvirt is a C
37+toolkit to interact with the virtualization capabilities of recent
38+versions of Linux (and other OSes). It is free software available
39+under the GNU Lesser General Public License. Virtualization of the
40+Linux Operating System means the ability to run multiple instances of
41+Operating Systems concurrently on a single hardware system where the
42+basic resources are driven by a Linux instance. The library aims at
43+providing a long term stable C API. It currently supports Xen, QEmu,
44+KVM, LXC, OpenVZ, VirtualBox, OpenNebula, and VMware ESX.
45+
46+The basic structure of most virsh usage is:
47+
48+ virsh [OPTION]... <command> <domain-id> [ARG]...
49+
50+Where I<command> is one of the commands listed below, I<domain-id>
51+is the numeric domain id, or the domain name (which will be internally
52+translated to domain id), and I<ARGS> are command specific
53+options. There are a few exceptions to this rule in the cases where
54+the command in question acts on all domains, the entire machine,
55+or directly on the xen hypervisor. Those exceptions will be clear for
56+each of those commands.
57+
58+The B<virsh> program can be used either to run one I<COMMAND> by giving the
59+command and its arguments on the shell command line, or a I<COMMAND_STRING>
60+which is a single shell argument consisting of multiple I<COMMAND> actions
61+and their arguments joined with whitespace, and separated by semicolons
62+between commands. Within I<COMMAND_STRING>, virsh understands the
63+same single, double, and backslash escapes as the shell, although you must
64+add another layer of shell escaping in creating the single shell argument.
65+If no command is given in the command line, B<virsh> will then start a minimal
66+interpreter waiting for your commands, and the B<quit> command will then exit
67+the program.
68+
69+The B<virsh> program understands the following I<OPTIONS>.
70+
71+=over 4
72+
73+=item B<-h>, B<--help>
74+
75+Ignore all other arguments, and behave as if the B<help> command were
76+given instead.
77+
78+=item B<-v>, B<--version>
79+
80+Ignore all other arguments, and behave as if the B<version> command were
81+given instead.
82+
83+=item B<-c>, B<--connect> I<URI>
84+
85+Connect to the specified I<URI>, as if by the B<connect> command,
86+instead of the default connection.
87+
88+=item B<-d>, B<--debug> I<LEVEL>
89+
90+Enable debug messages at integer I<LEVEL> and above. I<LEVEL> can
91+range from 0 (default) to 5.
92+
93+=item B<-l>, B<--log> I<FILE>
94+
95+Output logging details to I<FILE>.
96+
97+=item B<-q>, B<--quiet>
98+
99+Avoid extra informational messages.
100+
101+=item B<-r>, B<--readonly>
102+
103+Make the initial connection read-only, as if by the I<--readonly>
104+option of the B<connect> command.
105+
106+=item B<-t>, B<--timing>
107+
108+Output elapsed time information for each command.
109+
110+=back
111+
112+=head1 NOTES
113+
114+Most B<virsh> operations rely upon the libvirt library being able to
115+connect to an already running libvirtd service. This can usually be
116+done using the command B<service libvirtd start>.
117+
118+Most B<virsh> commands require root privileges to run due to the
119+communications channels used to talk to the hypervisor. Running as
120+non root will return an error.
121+
122+Most B<virsh> commands act synchronously, except maybe shutdown,
123+setvcpus and setmem. In those cases the fact that the B<virsh>
124+program returned, may not mean the action is complete and you
125+must poll periodically to detect that the guest completed the
126+operation.
127+
128+=head1 GENERIC COMMANDS
129+
130+The following commands are generic i.e. not specific to a domain.
131+
132+=over 4
133+
134+=item B<help> optional I<command>
135+
136+This prints a small synopsis about all commands available for B<virsh>
137+B<help> I<command> will print out a detailed help message on that command.
138+
139+=item B<quit>, B<exit>
140+
141+quit this interactive terminal
142+
143+=item B<version>
144+
145+Will print out the major version info about what this built from.
146+
147+=over 4
148+
149+B<Example>
150+
151+B<virsh> version
152+
153+Compiled against library: libvir 0.0.6
154+
155+Using library: libvir 0.0.6
156+
157+Using API: Xen 3.0.0
158+
159+Running hypervisor: Xen 3.0.0
160+
161+=back
162+
163+=item B<cd> optional I<directory>
164+
165+Will change current directory to I<directory>. The default directory
166+for the B<cd> command is the home directory or, if there is no I<HOME>
167+variable in the environment, the root directory.
168+
169+This command is only available in interactive mode.
170+
171+=item B<pwd>
172+
173+Will print the current directory.
174+
175+=item B<connect> I<URI> optional I<--readonly>
176+
177+(Re)-Connect to the hypervisor. When the shell is first started, this
178+is automatically run with the I<URI> parameter requested by the C<-c>
179+option on the command line. The I<URI> parameter specifies how to
180+connect to the hypervisor. The documentation page at
181+L<http://libvirt.org/uri.html> list the values supported, but the most
182+common are:
183+
184+=over 4
185+
186+=item xen:///
187+
188+this is used to connect to the local Xen hypervisor, this is the default
189+
190+=item qemu:///system
191+
192+connect locally as root to the daemon supervising QEmu and KVM domains
193+
194+=item qemu:///session
195+
196+connect locally as a normal user to his own set of QEmu and KVM domains
197+
198+=item lxc:///
199+
200+connect to a local linux container
201+
202+=back
203+
204+For remote access see the documentation page on how to make URIs.
205+The I<--readonly> option allows for read-only connection
206+
207+=item B<uri>
208+
209+Prints the hypervisor canonical URI, can be useful in shell mode.
210+
211+=item B<hostname>
212+
213+Print the hypervisor hostname.
214+
215+=item B<nodeinfo>
216+
217+Returns basic information about the node, like number and type of CPU,
218+and size of the physical memory.
219+
220+=item B<capabilities>
221+
222+Print an XML document describing the capabilities of the hypervisor
223+we are currently connected to. This includes a section on the host
224+capabilities in terms of CPU and features, and a set of description
225+for each kind of guest which can be virtualized. For a more complete
226+description see:
227+ L<http://libvirt.org/formatcaps.html>
228+The XML also show the NUMA topology information if available.
229+
230+=item B<list> optional I<--inactive> I<--all>
231+
232+Prints information about one or more domains. If no domains are
233+specified it prints out information about running domains.
234+
235+An example format for the list is as follows:
236+
237+B<virsh> list
238+ Id Name State
239+
240+----------------------------------
241+
242+ 0 Domain-0 running
243+ 2 fedora paused
244+
245+
246+Name is the name of the domain. ID the domain numeric id.
247+State is the run state (see below).
248+
249+B<STATES>
250+
251+The State field lists 7 states for a domain, and which ones the
252+current domain is in.
253+
254+=over 4
255+
256+=item B<running>
257+
258+The domain is currently running on a CPU
259+
260+=item B<idle>
261+
262+The domain is idle, and not running or runnable. This can be caused
263+because the domain is waiting on IO (a traditional wait state) or has
264+gone to sleep because there was nothing else for it to do.
265+
266+=item B<paused>
267+
268+The domain has been paused, usually occurring through the administrator
269+running B<virsh suspend>. When in a paused state the domain will still
270+consume allocated resources like memory, but will not be eligible for
271+scheduling by the hypervisor.
272+
273+=item B<shutdown>
274+
275+The domain is in the process of shutting down, i.e. the guest operating system
276+has been notified and should be in the process of stopping its operations
277+gracefully.
278+
279+=item B<shut off>
280+
281+The domain is not running. Usually this indicates the domain has been
282+shut down completely, or has not been started.
283+
284+=item B<crashed>
285+
286+The domain has crashed, which is always a violent ending. Usually
287+this state can only occur if the domain has been configured not to
288+restart on crash.
289+
290+=item B<dying>
291+
292+The domain is in process of dying, but hasn't completely shutdown or
293+crashed.
294+
295+=back
296+
297+=item B<freecell> optional I<cellno>
298+
299+Prints the available amount of memory on the machine or within a
300+NUMA cell if I<cellno> is provided.
301+
302+=item B<cpu-baseline> I<FILE>
303+
304+Compute baseline CPU which will be supported by all host CPUs given in <file>.
305+The list of host CPUs is built by extracting all <cpu> elements from the
306+<file>. Thus, the <file> can contain either a set of <cpu> elements separated
307+by new lines or even a set of complete <capabilities> elements printed by
308+B<capabilities> command.
309+
310+=item B<cpu-compare> I<FILE>
311+
312+Compare CPU definition from XML <file> with host CPU. The XML <file> may
313+contain either host or guest CPU definition. The host CPU definition is the
314+<cpu> element and its contents as printed by B<capabilities> command. The
315+guest CPU definition is the <cpu> element and its contents from domain XML
316+definition. For more information on guest CPU definition see:
317+L<http://libvirt.org/formatdomain.html#elementsCPU>
318+
319+=back
320+
321+=head1 DOMAIN COMMANDS
322+
323+The following commands manipulate domains directly, as stated
324+previously most commands take domain-id as the first parameter. The
325+I<domain-id> can be specified as an short integer, a name or a full UUID.
326+
327+=over 4
328+
329+=item B<autostart> optional I<--disable> I<domain-id>
330+
331+Configure a domain to be automatically started at boot.
332+
333+The option I<--disable> disables autostarting.
334+
335+=item B<console> I<domain-id>
336+
337+Connect the virtual serial console for the guest.
338+
339+=item B<create> I<FILE> optional I<--console> I<--paused>
340+
341+Create a domain from an XML <file>. An easy way to create the XML
342+<file> is to use the B<dumpxml> command to obtain the definition of a
343+pre-existing guest. The domain will be paused if the I<--paused> option
344+is used and supported by the driver; otherwise it will be running.
345+If I<--console> is requested, attach to the console after creation.
346+
347+B<Example>
348+
349+ virsh dumpxml <domain-id> > domain.xml
350+ edit domain.xml
351+ virsh create < domain.xml
352+
353+=item B<define> I<FILE>
354+
355+Define a domain from an XML <file>. The domain definition is registered
356+but not started.
357+
358+=item B<destroy> I<domain-id>
359+
360+Immediately terminate the domain domain-id. This doesn't give the domain
361+OS any chance to react, and it's the equivalent of ripping the power
362+cord out on a physical machine. In most cases you will want to use
363+the B<shutdown> command instead.
364+
365+=item B<domblkstat> I<domain> I<block-device>
366+
367+Get device block stats for a running domain.
368+
369+=item B<domifstat> I<domain> I<interface-device>
370+
371+Get network interface stats for a running domain.
372+
373+=item B<dommemstat> I<domain>
374+
375+Get memory stats for a running domain.
376+
377+=item B<domblkinfo> I<domain> I<block-device>
378+
379+Get block device size info for a domain.
380+
381+=item B<dominfo> I<domain-id>
382+
383+Returns basic information about the domain.
384+
385+=item B<domuuid> I<domain-name-or-id>
386+
387+Convert a domain name or id to domain UUID
388+
389+=item B<domid> I<domain-name-or-uuid>
390+
391+Convert a domain name (or UUID) to a domain id
392+
393+=item B<domjobabort> I<domain-id-or-uuid>
394+
395+Abort the currently running domain job.
396+
397+=item B<domjobinfo> I<domain-id-or-uuid>
398+
399+Returns information about jobs running on a domain.
400+
401+=item B<domname> I<domain-id-or-uuid>
402+
403+Convert a domain Id (or UUID) to domain name
404+
405+=item B<domstate> I<domain-id>
406+
407+Returns state about a running domain.
408+
409+=item B<domxml-from-native> I<format> I<config>
410+
411+Convert the file I<config> in the native guest configuration format
412+named by I<format> to a domain XML format.
413+
414+=item B<domxml-to-native> I<format> I<xml>
415+
416+Convert the file I<xml> in domain XML format to the native guest
417+configuration format named by I<format>.
418+
419+=item B<dump> I<domain-id> I<corefilepath>
420+
421+Dumps the core of a domain to a file for analysis.
422+
423+=item B<dumpxml> I<domain-id> optional I<--inactive> I<--security-info> I<--update-cpu>
424+
425+Output the domain information as an XML dump to stdout, this format can be used
426+by the B<create> command. Additional options affecting the XML dump may be
427+used. I<--inactive> tells virsh to dump domain configuration that will be used
428+on next start of the domain as opposed to the current domain configuration.
429+Using I<--security-info> security sensitive information will also be included
430+in the XML dump. I<--update-cpu> updates domain CPU requirements according to
431+host CPU.
432+
433+=item B<echo> optional I<--shell> I<--xml> I<arg>...
434+
435+Echo back each I<arg>, separated by space. If I<--shell> is
436+specified, then the output will be single-quoted where needed, so that
437+it is suitable for reuse in a shell context. If I<--xml> is
438+specified, then the output will be escaped for use in XML.
439+
440+=item B<edit> I<domain-id>
441+
442+Edit the XML configuration file for a domain.
443+
444+This is equivalent to:
445+
446+ virsh dumpxml domain > domain.xml
447+ edit domain.xml
448+ virsh define domain.xml
449+
450+except that it does some error checking.
451+
452+The editor used can be supplied by the C<$VISUAL> or C<$EDITOR> environment
453+variables, and defaults to C<vi>.
454+
455+=item B<managedsave> I<domain-id>
456+
457+Save and destroy a running domain, so it can be restarted from the same
458+state at a later time. When the virsh B<start> command is next run for
459+the domain, it will automatically be started from this saved state.
460+
461+=item B<managedsave-remove> I<domain-id>
462+
463+Remove the B<managedsave> state file for a domain, if it exists. This
464+ensures the domain will do a full boot the next time it is started.
465+
466+=item B<maxvcpus> optional I<type>
467+
468+Provide the maximum number of virtual CPUs supported for a guest VM on
469+this connection. If provided, the I<type> parameter must be a valid
470+type attribute for the <domain> element of XML.
471+
472+=item B<migrate> optional I<--live> I<--suspend> I<domain-id> I<desturi>
473+I<migrateuri>
474+
475+Migrate domain to another host. Add --live for live migration; --suspend
476+leaves the domain paused on the destination host. The I<desturi> is the
477+connection URI of the destination host, and I<migrateuri> is the
478+migration URI, which usually can be omitted.
479+
480+=item B<migrate-setmaxdowntime> I<domain-id> I<downtime>
481+
482+Set maximum tolerable downtime for a domain which is being live-migrated to
483+another host. The I<downtime> is a number of milliseconds the guest is allowed
484+to be down at the end of live migration.
485+
486+=item B<reboot> I<domain-id>
487+
488+Reboot a domain. This acts just as if the domain had the B<reboot>
489+command run from the console. The command returns as soon as it has
490+executed the reboot action, which may be significantly before the
491+domain actually reboots.
492+
493+The exact behavior of a domain when it reboots is set by the
494+I<on_reboot> parameter in the domain's XML definition.
495+
496+=item B<restore> I<state-file>
497+
498+Restores a domain from an B<virsh save> state file. See I<save> for more info.
499+
500+=item B<save> I<domain-id> I<state-file>
501+
502+Saves a running domain to a state file so that it can be restored
503+later. Once saved, the domain will no longer be running on the
504+system, thus the memory allocated for the domain will be free for
505+other domains to use. B<virsh restore> restores from this state file.
506+
507+This is roughly equivalent to doing a hibernate on a running computer,
508+with all the same limitations. Open network connections may be
509+severed upon restore, as TCP timeouts may have expired.
510+
511+=item B<schedinfo> optional I<--set> B<parameter=value> I<domain-id>
512+
513+=item B<schedinfo> optional I<--weight> B<number> optional I<--cap> B<number> I<domain-id>
514+
515+Allows you to show (and set) the domain scheduler parameters. The parameters available for each hypervisor are:
516+
517+LXC, QEMU/KVM (posix scheduler): cpu_shares
518+
519+Xen (credit scheduler): weight, cap
520+
521+ESX (allocation scheduler): reservation, limit, shares
522+
523+B<Note>: The cpu_shares parameter has a valid value range of 0-262144.
524+
525+B<Note>: The weight and cap parameters are defined only for the
526+XEN_CREDIT scheduler and are now I<DEPRECATED>.
527+
528+=item B<setmem> I<domain-id> B<kilobytes>
529+
530+Change the current memory allocation in the guest domain. This should take
531+effect immediately. The memory limit is specified in
532+kilobytes.
533+
534+For Xen, you can only adjust the memory of a running domain if the
535+domain is paravirtualized or running the PV balloon driver.
536+
537+=item B<setmaxmem> I<domain-id> B<kilobytes>
538+
539+Change the maximum memory allocation limit in the guest domain. This should
540+not change the current memory use. The memory limit is specified in
541+kilobytes.
542+
543+=item B<memtune> I<domain-id> optional I<--hard-limit> B<kilobytes>
544+optional I<--soft-limit> B<kilobytes> optional I<--swap-hard-limit>
545+B<kilobytes> -I<--min-guarantee> B<kilobytes>
546+
547+Allows you to display or set the domain memory parameters. Without
548+flags, the current settings are displayed; with a flag, the
549+appropriate limit is adjusted if supported by the hypervisor. LXC and
550+QEMU/KVM supports I<--hard-limit>, I<--soft-limit>, and I<--swap-hard-limit>.
551+
552+=item B<setvcpus> I<domain-id> I<count> optional I<--maximum> I<--config>
553+I<--live>
554+
555+Change the number of virtual CPUs active in the guest domain. Note that
556+I<count> may be limited by host, hypervisor or limit coming from the
557+original description of domain.
558+
559+For Xen, you can only adjust the virtual CPUs of a running domain if
560+the domain is paravirtualized.
561+
562+If I<--config> is specified, the change will only affect the next
563+boot of a domain. If I<--live> is specified, the domain must be
564+running, and the change takes place immediately. Both flags may be
565+specified, if supported by the hypervisor. If neither flag is given,
566+then I<--live> is implied and it is up to the hypervisor whether
567+I<--config> is also implied.
568+
569+If I<--maximum> is specified, then you must use I<--config> and
570+avoid I<--live>; this flag controls the maximum limit of vcpus that
571+can be hot-plugged the next time the domain is booted.
572+
573+=item B<shutdown> I<domain-id>
574+
575+Gracefully shuts down a domain. This coordinates with the domain OS
576+to perform graceful shutdown, so there is no guarantee that it will
577+succeed, and may take a variable length of time depending on what
578+services must be shutdown in the domain.
579+
580+The exact behavior of a domain when it shuts down is set by the
581+I<on_shutdown> parameter in the domain's XML definition.
582+
583+=item B<start> I<domain-name> optional I<--console> I<--paused>
584+
585+Start a (previously defined) inactive domain, either from the last
586+B<managedsave> state, or via a fresh boot if no managedsave state is
587+present. The domain will be paused if the I<--paused> option is
588+used and supported by the driver; otherwise it will be running.
589+If I<--console> is requested, attach to the console after creation.
590+
591+=item B<suspend> I<domain-id>
592+
593+Suspend a running domain. It is kept in memory but won't be scheduled
594+anymore.
595+
596+=item B<resume> I<domain-id>
597+
598+Moves a domain out of the suspended state. This will allow a previously
599+suspended domain to now be eligible for scheduling by the underlying
600+hypervisor.
601+
602+=item B<ttyconsole> I<domain-id>
603+
604+Output the device used for the TTY console of the domain. If the information
605+is not available the processes will provide an exit code of 1.
606+
607+=item B<undefine> I<domain-id>
608+
609+Undefine the configuration for an inactive domain. Since it's not running
610+the domain name or UUID must be used as the I<domain-id>.
611+
612+=item B<vcpucount> I<domain-id> optional I<--maximum> I<--current>
613+I<--config> I<--live>
614+
615+Print information about the virtual cpu counts of the given
616+I<domain-id>. If no flags are specified, all possible counts are
617+listed in a table; otherwise, the output is limited to just the
618+numeric value requested.
619+
620+I<--maximum> requests information on the maximum cap of vcpus that a
621+domain can add via B<setvcpus>, while I<--current> shows the current
622+usage; these two flags cannot both be specified. I<--config>
623+requests information regarding the next time the domain will be
624+booted, while I<--live> requires a running domain and lists current
625+values; these two flags cannot both be specified.
626+
627+=item B<vcpuinfo> I<domain-id>
628+
629+Returns basic information about the domain virtual CPUs, like the number of
630+vCPUs, the running time, the affinity to physical processors.
631+
632+=item B<vcpupin> I<domain-id> I<vcpu> I<cpulist>
633+
634+Pin domain VCPUs to host physical CPUs. The I<vcpu> number must be provided
635+and I<cpulist> is a comma separated list of physical CPU numbers.
636+
637+=item B<vncdisplay> I<domain-id>
638+
639+Output the IP address and port number for the VNC display. If the information
640+is not available the processes will provide an exit code of 1.
641+
642+=back
643+
644+=head1 DEVICE COMMANDS
645+
646+The following commands manipulate devices associated to domains.
647+The domain-id can be specified as an short integer, a name or a full UUID.
648+To better understand the values allowed as options for the command
649+reading the documentation at L<http://libvirt.org/formatdomain.html> on the
650+format of the device sections to get the most accurate set of accepted values.
651+
652+=over 4
653+
654+=item B<attach-device> I<domain-id> I<FILE>
655+
656+Attach a device to the domain, using a device definition in an XML file.
657+See the documentation to learn about libvirt XML format for a device.
658+For cdrom and floppy devices, this command only replaces the media within
659+the single existing device; consider using B<update-device> for this usage.
660+
661+=item B<attach-disk> I<domain-id> I<source> I<target> optional I<--driver driver> I<--subdriver subdriver> I<--type type> I<--mode mode>
662+
663+Attach a new disk device to the domain.
664+I<source> and I<target> are paths for the files and devices.
665+I<driver> can be I<file>, I<tap> or I<phy> depending on the kind of access.
666+I<type> can indicate I<cdrom> or I<floppy> as alternative to the disk default,
667+although this use only replaces the media within the existing virtual cdrom or
668+floppy device; consider using B<update-device> for this usage instead.
669+I<mode> can specify the two specific mode I<readonly> or I<shareable>.
670+
671+=item B<attach-interface> I<domain-id> I<type> I<source> optional I<--target target> I<--mac mac> I<--script script> I<--model model> I<--persistent>
672+
673+Attach a new network interface to the domain.
674+I<type> can be either I<network> to indicate a physical network device or I<bridge> to indicate a bridge to a device.
675+I<source> indicates the source device.
676+I<target> allows to indicate the target device in the guest.
677+I<mac> allows to specify the MAC address of the network interface.
678+I<script> allows to specify a path to a script handling a bridge instead of
679+the default one.
680+I<model> allows to specify the model type.
681+I<persistent> indicates the changes will affect the next boot of the domain.
682+
683+=item B<detach-device> I<domain-id> I<FILE>
684+
685+Detach a device from the domain, takes the same kind of XML descriptions
686+as command B<attach-device>.
687+
688+=item B<detach-disk> I<domain-id> I<target>
689+
690+Detach a disk device from a domain. The I<target> is the device as seen
691+from the domain.
692+
693+=item B<detach-interface> I<domain-id> I<type> optional I<--mac mac>
694+
695+Detach a network interface from a domain.
696+I<type> can be either I<network> to indicate a physical network device or I<bridge> to indicate a bridge to a device.
697+It is recommended to use the I<mac> option to distinguish between the interfaces
698+if more than one are present on the domain.
699+
700+=item B<update-device> I<domain-id> I<file> optional I<--persistent>
701+
702+Update the characteristics of a device associated with I<domain-id>,
703+based on the device definition in an XML I<file>. If the I<--persistent>
704+option is used, the changes will affect the next boot of the domain.
705+See the documentation to learn about libvirt XML format for a device.
706+
707+=back
708+
709+=head1 VIRTUAL NETWORK COMMANDS
710+
711+The following commands manipulate networks. Libvirt has the capability to
712+define virtual networks which can then be used by domains and linked to
713+actual network devices. For more detailed information about this feature
714+see the documentation at L<http://libvirt.org/formatnetwork.html> . A lot
715+of the command for virtual networks are similar to the one used for domains,
716+but the way to name a virtual network is either by its name or UUID.
717+
718+=over 4
719+
720+=item B<net-autostart> I<network> optional I<--disable>
721+
722+Configure a virtual network to be automatically started at boot.
723+The I<--disable> option disable autostarting.
724+
725+=item B<net-create> I<file>
726+
727+Create a virtual network from an XML I<file>, see the documentation to get
728+a description of the XML network format used by libvirt.
729+
730+=item B<net-define> I<file>
731+
732+Define a virtual network from an XML I<file>, the network is just defined but
733+not instantiated.
734+
735+=item B<net-destroy> I<network>
736+
737+Destroy a given virtual network specified by its name or UUID. This takes
738+effect immediately.
739+
740+=item B<net-dumpxml> I<network>
741+
742+Output the virtual network information as an XML dump to stdout.
743+
744+=item B<net-edit> I<network>
745+
746+Edit the XML configuration file for a network.
747+
748+This is equivalent to:
749+
750+ virsh net-dumpxml network > network.xml
751+ edit network.xml
752+ virsh net-define network.xml
753+
754+except that it does some error checking.
755+
756+The editor used can be supplied by the C<$VISUAL> or C<$EDITOR> environment
757+variables, and defaults to C<vi>.
758+
759+=item B<net-list> optional I<--inactive> or I<--all>
760+
761+Returns the list of active networks, if I<--all> is specified this will also
762+include defined but inactive networks, if I<--inactive> is specified only the
763+inactive ones will be listed.
764+
765+=item B<net-name> I<network-UUID>
766+
767+Convert a network UUID to network name.
768+
769+=item B<net-start> I<network>
770+
771+Start a (previously defined) inactive network.
772+
773+=item B<net-undefine> I<network>
774+
775+Undefine the configuration for an inactive network.
776+
777+=item B<net-uuid> I<network-name>
778+
779+Convert a network name to network UUID.
780+
781+=back
782+
783+=head1 STORAGE POOL COMMANDS
784+
785+The following commands manipulate storage pools. Libvirt has the
786+capability to manage various storage solutions, including files, raw
787+partitions, and domain-specific formats, used to provide the storage
788+volumes visible as devices within virtual machines. For more detailed
789+information about this feature, see the documentation at
790+L<http://libvirt.org/formatstorage.html> . A lot of the commands for
791+pools are similar to the ones used for domains.
792+
793+=over 4
794+
795+=item B<find-storage-pool-sources> I<type> optional I<srcSpec>
796+
797+Returns XML describing all storage pools of a given I<type> that could
798+be found. If I<srcSpec> is provided, it is a file that contains XML
799+to further restrict the query for pools.
800+
801+=item B<find-storage-pool-sources> I<type> optional I<host> I<port>
802+
803+Returns XML describing all storage pools of a given I<type> that could
804+be found. If I<host> and I<port> are provided, they control where the
805+query is performed.
806+
807+=item B<pool-autostart> I<pool-or-uuid> optional I<--disable>
808+
809+Configure whether I<pool> should automatically start at boot.
810+
811+=item B<pool-build> I<pool-or-uuid>
812+
813+Build a given pool.
814+
815+=item B<pool-create> I<file>
816+
817+Create and start a pool object from the XML I<file>.
818+
819+=item B<pool-create-as> I<name> I<--print-xml> I<type> optional I<source-host>
820+I<source-path> I<source-dev> I<source-name> <target> I<--source-format format>
821+
822+Create and start a pool object I<name> from the raw parameters. If
823+I<--print-xml> is specified, then print the XML of the pool object
824+without creating the pool. Otherwise, the pool has the specified
825+I<type>.
826+
827+=item B<pool-define> I<file>
828+
829+Create, but do not start, a pool object from the XML I<file>.
830+
831+=item B<pool-define-as> I<name> I<--print-xml> I<type> optional I<source-host>
832+I<source-path> I<source-dev> I<source-name> <target> I<--source-format format>
833+
834+Create, but do not start, a pool object I<name> from the raw parameters. If
835+I<--print-xml> is specified, then print the XML of the pool object
836+without defining the pool. Otherwise, the pool has the specified
837+I<type>.
838+
839+=item B<pool-destroy> I<pool-or-uuid>
840+
841+Destroy a given I<pool> object. Libvirt will no longer manage the
842+storage described by the pool object, but the raw data contained in
843+the pool is not changed, and can be later recovered with
844+B<pool-create>.
845+
846+=item B<pool-delete> I<pool-or-uuid>
847+
848+Destroy the resources used by a given I<pool> object. This operation
849+is non-recoverable. The I<pool> object will still exist after this
850+command.
851+
852+=item B<pool-dumpxml> I<pool-or-uuid>
853+
854+Returns the XML information about the I<pool> object.
855+
856+=item B<pool-edit> I<pool-or-uuid>
857+
858+Edit the XML configuration file for a storage pool.
859+
860+This is equivalent to:
861+
862+ virsh pool-dumpxml pool > pool.xml
863+ edit pool.xml
864+ virsh pool-define pool.xml
865+
866+except that it does some error checking.
867+
868+The editor used can be supplied by the C<$VISUAL> or C<$EDITOR> environment
869+variables, and defaults to C<vi>.
870+
871+=item B<pool-info> I<pool-or-uuid>
872+
873+Returns basic information about the I<pool> object.
874+
875+=item B<pool-list> optional I<--inactive> I<--all> I<--details>
876+
877+List pool objects known to libvirt. By default, only pools in use by
878+active domains are listed; I<--inactive> lists just the inactive
879+pools, and I<--all> lists all pools. The I<--details> option instructs
880+virsh to additionally display pool persistence and capacity related
881+information where available.
882+
883+=item B<pool-name> I<uuid>
884+
885+Convert the I<uuid> to a pool name.
886+
887+=item B<pool-refresh> I<pool-or-uuid>
888+
889+Refresh the list of volumes contained in I<pool>.
890+
891+=item B<pool-start> I<pool-or-uuid>
892+
893+Start the storage I<pool>, which is previously defined but inactive.
894+
895+=item B<pool-undefine> I<pool-or-uuid>
896+
897+Undefine the configuration for an inactive I<pool>.
898+
899+=item B<pool-uuid> I<pool>
900+
901+Returns the UUID of the named I<pool>.
902+
903+=back
904+
905+=head1 VOLUME COMMANDS
906+
907+=over 4
908+
909+=item B<vol-create> I<pool-or-uuid> I<FILE>
910+
911+Create a volume from an XML <file>.
912+I<pool-or-uuid> is the name or UUID of the storage pool to create the volume in.
913+I<FILE> is the XML <file> with the volume definition. An easy way to create the
914+XML <file> is to use the B<vol-dumpxml> command to obtain the definition of a
915+pre-existing volume.
916+
917+B<Example>
918+
919+ virsh vol-dumpxml --pool storagepool1 appvolume1 > newvolume.xml
920+ edit newvolume.xml
921+ virsh vol-create differentstoragepool newvolume.xml
922+
923+=item B<vol-create-from> I<pool-or-uuid> I<FILE> [optional I<--inputpool>
924+I<pool-or-uuid>] I<vol-name-or-key-or-path>
925+
926+Create a volume, using another volume as input.
927+I<pool-or-uuid> is the name or UUID of the storage pool to create the volume in.
928+I<FILE> is the XML <file> with the volume definition.
929+I<--inputpool> I<pool-or-uuid> is the name or uuid of the storage pool the
930+source volume is in.
931+I<vol-name-or-key-or-path> is the name or key or path of the source volume.
932+
933+=item B<vol-create-as> I<pool-or-uuid> I<name> I<capacity> optional
934+I<--allocation> I<size> I<--format> I<string> I<--backing-vol>
935+I<vol-name-or-key-or-path> I<--backing-vol-format> I<string>
936+
937+Create a volume from a set of arguments.
938+I<pool-or-uuid> is the name or UUID of the storage pool to create the volume
939+in.
940+I<name> is the name of the new volume.
941+I<capacity> is the size of the volume to be created, with optional k, M, G, or
942+T suffix.
943+I<--allocation> I<size> is the initial size to be allocated in the volume, with
944+optional k, M, G, or T suffix.
945+I<--format> I<string> is used in file based storage pools to specify the volume
946+file format to use; raw, bochs, qcow, qcow2, vmdk.
947+I<--backing-vol> I<vol-name-or-key-or-path> is the source backing
948+volume to be used if taking a snapshot of an existing volume.
949+I<--backing-vol-format> I<string> is the format of the snapshot backing volume;
950+raw, bochs, qcow, qcow2, vmdk, host_device.
951+
952+=item B<vol-clone> [optional I<--pool> I<pool-or-uuid>] I<vol-name-or-key-or-path> I<name>
953+
954+Clone an existing volume. Less powerful, but easier to type, version of
955+B<vol-create-from>.
956+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool to create the volume in.
957+I<vol-name-or-key-or-path> is the name or key or path of the source volume.
958+I<name> is the name of the new volume.
959+
960+=item B<vol-delete> [optional I<--pool> I<pool-or-uuid>] I<vol-name-or-key-or-path>
961+
962+Delete a given volume.
963+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool the volume is in.
964+I<vol-name-or-key-or-path> is the name or key or path of the volume to delete.
965+
966+=item B<vol-wipe> [optional I<--pool> I<pool-or-uuid>] I<vol-name-or-key-or-path>
967+
968+Wipe a volume, ensure data previously on the volume is not accessible to future reads.
969+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool the volume is in.
970+I<vol-name-or-key-or-path> is the name or key or path of the volume to wipe.
971+
972+=item B<vol-dumpxml> [optional I<--pool> I<pool-or-uuid>] I<vol-name-or-key-or-path>
973+
974+Output the volume information as an XML dump to stdout.
975+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool the volume is in.
976+I<vol-name-or-key-or-path> is the name or key or path of the volume to output the XML of.
977+
978+=item B<vol-info> [optional I<--pool> I<pool-or-uuid>] I<vol-name-or-key-or-path>
979+
980+Returns basic information about the given storage volume.
981+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool the volume is in.
982+I<vol-name-or-key-or-path> is the name or key or path of the volume to return information for.
983+
984+=item B<vol-list> [optional I<--pool>] I<pool-or-uuid> optional I<--details>
985+
986+Return the list of volumes in the given storage pool.
987+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool.
988+The I<--details> option instructs virsh to additionally display volume
989+type and capacity related information where available.
990+
991+=item B<vol-pool> [optional I<--uuid>] I<vol-key-or-path>
992+
993+Return the pool name or UUID for a given volume. By default, the pool name is
994+returned. If the I<--uuid> option is given, the pool UUID is returned instead.
995+I<vol-key-or-path> is the key or path of the volume to return the pool
996+information for.
997+
998+=item B<vol-path> [optional I<--pool> I<pool-or-uuid>] I<vol-name-or-key>
999+
1000+Return the path for a given volume.
1001+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool the volume is in.
1002+I<vol-name-or-key> is the name or key of the volume to return the path for.
1003+
1004+=item B<vol-name> I<vol-key-or-path>
1005+
1006+Return the name for a given volume.
1007+I<vol-key-or-path> is the key or path of the volume to return the name for.
1008+
1009+=item B<vol-key> [optional I<--pool> I<pool-or-uuid>] I<vol-name-or-path>
1010+
1011+Return the volume key for a given volume.
1012+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool the volume is in.
1013+I<vol-name-or-path> is the name or path of the volume to return the volume key for.
1014+
1015+=back
1016+
1017+=head1 SECRET COMMMANDS
1018+
1019+The following commands manipulate "secrets" (e.g. passwords, passphrases and
1020+encryption keys). Libvirt can store secrets independently from their use, and
1021+other objects (e.g. volumes or domains) can refer to the secrets for encryption
1022+or possibly other uses. Secrets are identified using an UUID. See
1023+L<http://libvirt.org/formatsecret.html> for documentation of the XML format
1024+used to represent properties of secrets.
1025+
1026+=over 4
1027+
1028+=item B<secret-define> I<file>
1029+
1030+Create a secret with the properties specified in I<file>, with no associated
1031+secret value. If I<file> does not specify a UUID, choose one automatically.
1032+If I<file> specifies an UUID of an existing secret, replace its properties by
1033+properties defined in I<file>, without affecting the secret value.
1034+
1035+=item B<secret-dumpxml> I<secret>
1036+
1037+Output properties of I<secret> (specified by its UUID) as an XML dump to stdout.
1038+
1039+=item B<secret-set-value> I<secret> I<base64>
1040+
1041+Set the value associated with I<secret> (specified by its UUID) to the value
1042+Base64-encoded value I<base64>.
1043+
1044+=item B<secret-get-value> I<secret>
1045+
1046+Output the value associated with I<secret> (specified by its UUID) to stdout,
1047+encoded using Base64.
1048+
1049+=item B<secret-undefine> I<secret>
1050+
1051+Delete a I<secret> (specified by its UUID), including the associated value, if
1052+any.
1053+
1054+=item B<secret-list>
1055+
1056+Output a list of UUIDs of known secrets to stdout.
1057+
1058+=back
1059+
1060+=head1 SNAPSHOT COMMMANDS
1061+
1062+The following commands manipulate domain snapshots. Snapshots take the
1063+disk, memory, and device state of a domain at a point-of-time, and save it
1064+for future use. They have many uses, from saving a "clean" copy of an OS
1065+image to saving a domain's state before a potentially destructive operation.
1066+Snapshots are identified with a unique name. See
1067+L<http://libvirt.org/formatsnapshot.html> for documentation of the XML format
1068+used to represent properties of snapshots.
1069+
1070+=over 4
1071+
1072+=item B<snapshot-create> I<domain> I<xmlfile>
1073+
1074+Create a snapshot for domain I<domain> with the properties specified in
1075+I<xmlfile>. The only properties settable for a domain snapshot are the
1076+<name> and <description>; the rest of the fields are ignored, and
1077+automatically filled in by libvirt. If I<xmlfile> is completely omitted,
1078+then libvirt will choose a value for all fields.
1079+
1080+=item B<snapshot-current> I<domain>
1081+
1082+Output the snapshot XML for the domain's current snapshot (if any).
1083+
1084+=item B<snapshot-list> I<domain>
1085+
1086+List all of the available snapshots for the given domain.
1087+
1088+=item B<snapshot-dumpxml> I<domain> I<snapshot>
1089+
1090+Output the snapshot XML for the domain's snapshot named I<snapshot>.
1091+
1092+=item B<snapshot-revert> I<domain> I<snapshot>
1093+
1094+Revert the given domain to the snapshot specified by I<snapshot>. Be aware
1095+that this is a destructive action; any changes in the domain since the
1096+snapshot was taken will be lost. Also note that the state of the domain after
1097+snapshot-revert is complete will be the state of the domain at the time
1098+the original snapshot was taken.
1099+
1100+=item B<snapshot-delete> I<domain> I<snapshot> I<--children>
1101+
1102+Delete the snapshot for the domain named I<snapshot>. If this snapshot
1103+has child snapshots, changes from this snapshot will be merged into the
1104+children. If I<--children> is passed, then delete this snapshot and any
1105+children of this snapshot.
1106+
1107+=back
1108+
1109+=head1 NWFILTER COMMMANDS
1110+
1111+The following commands manipulate network filters. Network filters allow
1112+filtering of the network traffic coming from and going to virtual machines.
1113+Individual network traffic filters are written in XML and may contain
1114+references to other network filters, describe traffic filtering rules,
1115+or contain both. Network filters are referenced by virtual machines
1116+from within their interface description. A network filter may be referenced
1117+by multiple virtual machines' interfaces.
1118+
1119+=over 4
1120+
1121+=item B<nwfilter-define> I<xmlfile>
1122+
1123+Make a new network filter known to libvirt. If a network filter with
1124+the same name already exists, it will be replaced with the new XML.
1125+Any running virtual machine referencing this network filter will have
1126+its network traffic rules adapted. If for any reason the network traffic
1127+filtering rules cannot be instantiated by any of the running virtual
1128+machines, then the new XML will be rejected.
1129+
1130+=item B<nwfilter-undefine> I<nwfilter-name>
1131+
1132+Delete a network filter. The deletion will fail if any running virtual
1133+machine is currently using this network filter.
1134+
1135+=item B<nwfilter-list>
1136+
1137+List all of the available network filters.
1138+
1139+=item B<nwfilter-dumpxml> I<nwfilter-name>
1140+
1141+Output the network filter XML.
1142+
1143+=item B<nwfilter-edit> I<nwfilter-name>
1144+
1145+Edit the XML of a network filter.
1146+
1147+This is equivalent to:
1148+
1149+ virsh nwfilter-dumpxml myfilter > myfilter.xml
1150+ edit myfilter.xml
1151+ virsh nwfilter-define myfilter.xml
1152+
1153+except that it does some error checking.
1154+The new network filter may be rejected due to the same reason as
1155+mentioned in I<nwfilter-define>.
1156+
1157+The editor used can be supplied by the C<$VISUAL> or C<$EDITOR> environment
1158+variables, and defaults to C<vi>.
1159+
1160+=back
1161+
1162+=head1 ENVIRONMENT
1163+
1164+The following environment variables can be set to alter the behaviour
1165+of C<virsh>
1166+
1167+=over 4
1168+
1169+=item VIRSH_DEFAULT_CONNECT_URI
1170+
1171+The hypervisor to connect to by default. Set this to a URI, in the same
1172+format as accepted by the B<connect> option.
1173+
1174+=item VISUAL
1175+
1176+The editor to use by the B<edit> and related options.
1177+
1178+=item EDITOR
1179+
1180+The editor to use by the B<edit> and related options, if C<VISUAL>
1181+is not set.
1182+
1183+=item LIBVIRT_DEBUG=LEVEL
1184+
1185+Turn on verbose debugging of all libvirt API calls. Valid levels are
1186+
1187+=over 4
1188+
1189+=item * LIBVIRT_DEBUG=1
1190+
1191+Messages at level DEBUG or above
1192+
1193+=item * LIBVIRT_DEBUG=2
1194+
1195+Messages at level INFO or above
1196+
1197+=item * LIBVIRT_DEBUG=3
1198+
1199+Messages at level WARNING or above
1200+
1201+=item * LIBVIRT_DEBUG=4
1202+
1203+Messages at level ERROR or above
1204+
1205+=back
1206+
1207+For further information about debugging options consult C<http://libvirt.org/logging.html>
1208+
1209+=back
1210+
1211+=head1 BUGS
1212+
1213+Report any bugs discovered to the libvirt community via the mailing
1214+list C<http://libvirt.org/contact.html> or bug tracker C<http://libvirt.org/bugs.html>.
1215+Alternatively report bugs to your software distributor / vendor.
1216+
1217+=head1 AUTHORS
1218+
1219+ Please refer to the AUTHORS file distributed with libvirt.
1220+
1221+ Based on the xm man page by:
1222+ Sean Dague <sean at dague dot net>
1223+ Daniel Stekloff <dsteklof at us dot ibm dot com>
1224+
1225+=head1 COPYRIGHT
1226+
1227+Copyright (C) 2005, 2007-2010 Red Hat, Inc., and the authors listed in the
1228+libvirt AUTHORS file.
1229+
1230+=head1 LICENSE
1231+
1232+virsh is distributed under the terms of the GNU LGPL v2+.
1233+This is free software; see the source for copying conditions. There
1234+is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR
1235+PURPOSE
1236+
1237+=head1 SEE ALSO
1238+
1239+L<virt-install(1)>, L<virt-xml-validate(1)>, L<virt-top(1)>, L<virt-mem(1)>, L<virt-df(1)>, L<http://www.libvirt.org/>
1240+
1241+=cut
1242
1243=== added directory '.pc/0003-allow-libvirt-group-to-access-the-socket.patch'
1244=== renamed directory '.pc/0003-allow-libvirt-group-to-access-the-socket.patch' => '.pc/0003-allow-libvirt-group-to-access-the-socket.patch.moved'
1245=== renamed file '.pc/0003-allow-libvirt-group-to-access-the-socket.patch/daemon/libvirtd.conf' => '.pc/0003-allow-libvirt-group-to-access-the-socket.patch.moved/daemon/libvirtd.conf.THIS'
1246=== added file '.pc/0003-allow-libvirt-group-to-access-the-socket.patch/.timestamp'
1247=== added directory '.pc/0003-allow-libvirt-group-to-access-the-socket.patch/daemon'
1248=== added file '.pc/0003-allow-libvirt-group-to-access-the-socket.patch/daemon/libvirtd.conf'
1249--- .pc/0003-allow-libvirt-group-to-access-the-socket.patch/daemon/libvirtd.conf 1970-01-01 00:00:00 +0000
1250+++ .pc/0003-allow-libvirt-group-to-access-the-socket.patch/daemon/libvirtd.conf 2010-11-11 18:22:47 +0000
1251@@ -0,0 +1,346 @@
1252+# Master libvirt daemon configuration file
1253+#
1254+# For further information consult http://libvirt.org/format.html
1255+#
1256+# NOTE: the tests/daemon-conf regression test script requires
1257+# that each "PARAMETER = VALUE" line in this file have the parameter
1258+# name just after a leading "#".
1259+
1260+#################################################################
1261+#
1262+# Network connectivity controls
1263+#
1264+
1265+# Flag listening for secure TLS connections on the public TCP/IP port.
1266+# NB, must pass the --listen flag to the libvirtd process for this to
1267+# have any effect.
1268+#
1269+# It is necessary to setup a CA and issue server certificates before
1270+# using this capability.
1271+#
1272+# This is enabled by default, uncomment this to disable it
1273+#listen_tls = 0
1274+
1275+# Listen for unencrypted TCP connections on the public TCP/IP port.
1276+# NB, must pass the --listen flag to the libvirtd process for this to
1277+# have any effect.
1278+#
1279+# Using the TCP socket requires SASL authentication by default. Only
1280+# SASL mechanisms which support data encryption are allowed. This is
1281+# DIGEST_MD5 and GSSAPI (Kerberos5)
1282+#
1283+# This is disabled by default, uncomment this to enable it.
1284+#listen_tcp = 1
1285+
1286+
1287+
1288+# Override the port for accepting secure TLS connections
1289+# This can be a port number, or service name
1290+#
1291+#tls_port = "16514"
1292+
1293+# Override the port for accepting insecure TCP connections
1294+# This can be a port number, or service name
1295+#
1296+#tcp_port = "16509"
1297+
1298+
1299+# Override the default configuration which binds to all network
1300+# interfaces. This can be a numeric IPv4/6 address, or hostname
1301+#
1302+#listen_addr = "192.168.0.1"
1303+
1304+
1305+# Flag toggling mDNS advertizement of the libvirt service.
1306+#
1307+# Alternatively can disable for all services on a host by
1308+# stopping the Avahi daemon
1309+#
1310+# This is enabled by default, uncomment this to disable it
1311+#mdns_adv = 0
1312+
1313+# Override the default mDNS advertizement name. This must be
1314+# unique on the immediate broadcast network.
1315+#
1316+# The default is "Virtualization Host HOSTNAME", where HOSTNAME
1317+# is subsituted for the short hostname of the machine (without domain)
1318+#
1319+#mdns_name = "Virtualization Host Joe Demo"
1320+
1321+
1322+#################################################################
1323+#
1324+# UNIX socket access controls
1325+#
1326+
1327+# Set the UNIX domain socket group ownership. This can be used to
1328+# allow a 'trusted' set of users access to management capabilities
1329+# without becoming root.
1330+#
1331+# This is restricted to 'root' by default.
1332+#unix_sock_group = "libvirt"
1333+
1334+# Set the UNIX socket permissions for the R/O socket. This is used
1335+# for monitoring VM status only
1336+#
1337+# Default allows any user. If setting group ownership may want to
1338+# restrict this to:
1339+#unix_sock_ro_perms = "0777"
1340+
1341+# Set the UNIX socket permissions for the R/W socket. This is used
1342+# for full management of VMs
1343+#
1344+# Default allows only root. If PolicyKit is enabled on the socket,
1345+# the default will change to allow everyone (eg, 0777)
1346+#
1347+# If not using PolicyKit and setting group ownership for access
1348+# control then you may want to relax this to:
1349+#unix_sock_rw_perms = "0770"
1350+
1351+# Set the name of the directory in which sockets will be found/created.
1352+#unix_sock_dir = "/var/run/libvirt"
1353+
1354+#################################################################
1355+#
1356+# Authentication.
1357+#
1358+# - none: do not perform auth checks. If you can connect to the
1359+# socket you are allowed. This is suitable if there are
1360+# restrictions on connecting to the socket (eg, UNIX
1361+# socket permissions), or if there is a lower layer in
1362+# the network providing auth (eg, TLS/x509 certificates)
1363+#
1364+# - sasl: use SASL infrastructure. The actual auth scheme is then
1365+# controlled from /etc/sasl2/libvirt.conf. For the TCP
1366+# socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
1367+# For non-TCP or TLS sockets, any scheme is allowed.
1368+#
1369+# - polkit: use PolicyKit to authenticate. This is only suitable
1370+# for use on the UNIX sockets. The default policy will
1371+# require a user to supply their own password to gain
1372+# full read/write access (aka sudo like), while anyone
1373+# is allowed read/only access.
1374+#
1375+# Set an authentication scheme for UNIX read-only sockets
1376+# By default socket permissions allow anyone to connect
1377+#
1378+# To restrict monitoring of domains you may wish to enable
1379+# an authentication mechanism here
1380+#auth_unix_ro = "none"
1381+
1382+# Set an authentication scheme for UNIX read-write sockets
1383+# By default socket permissions only allow root. If PolicyKit
1384+# support was compiled into libvirt, the default will be to
1385+# use 'polkit' auth.
1386+#
1387+# If the unix_sock_rw_perms are changed you may wish to enable
1388+# an authentication mechanism here
1389+#auth_unix_rw = "none"
1390+
1391+# Change the authentication scheme for TCP sockets.
1392+#
1393+# If you don't enable SASL, then all TCP traffic is cleartext.
1394+# Don't do this outside of a dev/test scenario. For real world
1395+# use, always enable SASL and use the GSSAPI or DIGEST-MD5
1396+# mechanism in /etc/sasl2/libvirt.conf
1397+#auth_tcp = "sasl"
1398+
1399+# Change the authentication scheme for TLS sockets.
1400+#
1401+# TLS sockets already have encryption provided by the TLS
1402+# layer, and limited authentication is done by certificates
1403+#
1404+# It is possible to make use of any SASL authentication
1405+# mechanism as well, by using 'sasl' for this option
1406+#auth_tls = "none"
1407+
1408+
1409+
1410+#################################################################
1411+#
1412+# TLS x509 certificate configuration
1413+#
1414+
1415+
1416+# Override the default server key file path
1417+#
1418+#key_file = "/etc/pki/libvirt/private/serverkey.pem"
1419+
1420+# Override the default server certificate file path
1421+#
1422+#cert_file = "/etc/pki/libvirt/servercert.pem"
1423+
1424+# Override the default CA certificate path
1425+#
1426+#ca_file = "/etc/pki/CA/cacert.pem"
1427+
1428+# Specify a certificate revocation list.
1429+#
1430+# Defaults to not using a CRL, uncomment to enable it
1431+#crl_file = "/etc/pki/CA/crl.pem"
1432+
1433+
1434+
1435+#################################################################
1436+#
1437+# Authorization controls
1438+#
1439+
1440+
1441+# Flag to disable verification of client certificates
1442+#
1443+# Client certificate verification is the primary authentication mechanism.
1444+# Any client which does not present a certificate signed by the CA
1445+# will be rejected.
1446+#
1447+# Default is to always verify. Uncommenting this will disable
1448+# verification - make sure an IP whitelist is set
1449+#tls_no_verify_certificate = 1
1450+
1451+
1452+# A whitelist of allowed x509 Distinguished Names
1453+# This list may contain wildcards such as
1454+#
1455+# "C=GB,ST=London,L=London,O=Red Hat,CN=*"
1456+#
1457+# See the POSIX fnmatch function for the format of the wildcards.
1458+#
1459+# NB If this is an empty list, no client can connect, so comment out
1460+# entirely rather than using empty list to disable these checks
1461+#
1462+# By default, no DN's are checked
1463+#tls_allowed_dn_list = ["DN1", "DN2"]
1464+
1465+
1466+# A whitelist of allowed SASL usernames. The format for usernames
1467+# depends on the SASL authentication mechanism. Kerberos usernames
1468+# look like username@REALM
1469+#
1470+# This list may contain wildcards such as
1471+#
1472+# "*@EXAMPLE.COM"
1473+#
1474+# See the POSIX fnmatch function for the format of the wildcards.
1475+#
1476+# NB If this is an empty list, no client can connect, so comment out
1477+# entirely rather than using empty list to disable these checks
1478+#
1479+# By default, no Username's are checked
1480+#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
1481+
1482+
1483+
1484+#################################################################
1485+#
1486+# Processing controls
1487+#
1488+
1489+# The maximum number of concurrent client connections to allow
1490+# over all sockets combined.
1491+#max_clients = 20
1492+
1493+
1494+# The minimum limit sets the number of workers to start up
1495+# initially. If the number of active clients exceeds this,
1496+# then more threads are spawned, upto max_workers limit.
1497+# Typically you'd want max_workers to equal maximum number
1498+# of clients allowed
1499+#min_workers = 5
1500+#max_workers = 20
1501+
1502+# Total global limit on concurrent RPC calls. Should be
1503+# at least as large as max_workers. Beyond this, RPC requests
1504+# will be read into memory and queued. This directly impact
1505+# memory usage, currently each request requires 256 KB of
1506+# memory. So by default upto 5 MB of memory is used
1507+#
1508+# XXX this isn't actually enforced yet, only the per-client
1509+# limit is used so far
1510+#max_requests = 20
1511+
1512+# Limit on concurrent requests from a single client
1513+# connection. To avoid one client monopolizing the server
1514+# this should be a small fraction of the global max_requests
1515+# and max_workers parameter
1516+#max_client_requests = 5
1517+
1518+#################################################################
1519+#
1520+# Logging controls
1521+#
1522+
1523+# Logging level: 4 errors, 3 warnings, 2 informations, 1 debug
1524+# basically 1 will log everything possible
1525+#log_level = 3
1526+
1527+# Logging filters:
1528+# A filter allows to select a different logging level for a given category
1529+# of logs
1530+# The format for a filter is:
1531+# x:name
1532+# where name is a match string e.g. remote or qemu
1533+# the x prefix is the minimal level where matching messages should be logged
1534+# 1: DEBUG
1535+# 2: INFO
1536+# 3: WARNING
1537+# 4: ERROR
1538+#
1539+# Multiple filter can be defined in a single @filters, they just need to be
1540+# separated by spaces.
1541+#
1542+# e.g:
1543+# log_filters="3:remote 4:event"
1544+# to only get warning or errors from the remote layer and only errors from
1545+# the event layer.
1546+
1547+# Logging outputs:
1548+# An output is one of the places to save logging informations
1549+# The format for an output can be:
1550+# x:stderr
1551+# output goes to stderr
1552+# x:syslog:name
1553+# use syslog for the output and use the given name as the ident
1554+# x:file:file_path
1555+# output to a file, with the given filepath
1556+# In all case the x prefix is the minimal level, acting as a filter
1557+# 1: DEBUG
1558+# 2: INFO
1559+# 3: WARNING
1560+# 4: ERROR
1561+#
1562+# Multiple output can be defined, they just need to be separated by spaces.
1563+# e.g.:
1564+# log_outputs="3:syslog:libvirtd"
1565+# to log all warnings and errors to syslog under the libvirtd ident
1566+
1567+
1568+##################################################################
1569+#
1570+# Auditing
1571+#
1572+# This setting allows usage of the auditing subsystem to be altered:
1573+#
1574+# audit_level == 0 -> disable all auditing
1575+# audit_level == 1 -> enable auditing, only if enabled on host (default)
1576+# audit_level == 2 -> enable auditing, and exit if disabled on host
1577+#
1578+#audit_level = 2
1579+#
1580+# If set to 1, then audit messages will also be sent
1581+# via libvirt logging infrastructure. Defaults to 0
1582+#
1583+#audit_logging = 1
1584+
1585+###################################################################
1586+# UUID of the host:
1587+# Provide the UUID of the host here in case the command
1588+# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
1589+# 'dmidecode' does not provide a valid UUID and none is provided here, a
1590+# temporary UUID will be generated.
1591+# Keep the format of the example UUID below. UUID must not have all digits
1592+# be the same.
1593+
1594+# NB This default all-zeros UUID will not work. Replace
1595+# it with the output of the 'uuidgen' command and then
1596+# uncomment this entry
1597+#host_uuid = "00000000-0000-0000-0000-000000000000"
1598
1599=== added directory '.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch'
1600=== renamed directory '.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch' => '.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch.moved'
1601=== renamed file '.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/src/xen/xen_hypervisor.c' => '.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch.moved/src/xen/xen_hypervisor.c.THIS'
1602=== added file '.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/.timestamp'
1603=== added directory '.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/src'
1604=== added directory '.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/src/xen'
1605=== added file '.pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/src/xen/xen_hypervisor.c'
1606--- .pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/src/xen/xen_hypervisor.c 1970-01-01 00:00:00 +0000
1607+++ .pc/0004-fix-Debian-specific-path-to-hvm-loader.patch/src/xen/xen_hypervisor.c 2010-11-11 18:22:47 +0000
1608@@ -0,0 +1,3588 @@
1609+/*
1610+ * xen_internal.c: direct access to Xen hypervisor level
1611+ *
1612+ * Copyright (C) 2005-2010 Red Hat, Inc.
1613+ *
1614+ * See COPYING.LIB for the License of this software
1615+ *
1616+ * Daniel Veillard <veillard@redhat.com>
1617+ */
1618+
1619+#include <config.h>
1620+
1621+#include <stdio.h>
1622+#include <string.h>
1623+/* required for uint8_t, uint32_t, etc ... */
1624+#include <stdint.h>
1625+#include <sys/types.h>
1626+#include <sys/stat.h>
1627+#include <unistd.h>
1628+#include <fcntl.h>
1629+#include <sys/mman.h>
1630+#include <sys/ioctl.h>
1631+#include <limits.h>
1632+#include <stdint.h>
1633+#include <regex.h>
1634+#include <errno.h>
1635+#include <sys/utsname.h>
1636+
1637+#ifdef __sun
1638+# include <sys/systeminfo.h>
1639+
1640+# include <priv.h>
1641+
1642+# ifndef PRIV_XVM_CONTROL
1643+# define PRIV_XVM_CONTROL ((const char *)"xvm_control")
1644+# endif
1645+
1646+#endif /* __sun */
1647+
1648+/* required for dom0_getdomaininfo_t */
1649+#include <xen/dom0_ops.h>
1650+#include <xen/version.h>
1651+#ifdef HAVE_XEN_LINUX_PRIVCMD_H
1652+# include <xen/linux/privcmd.h>
1653+#else
1654+# ifdef HAVE_XEN_SYS_PRIVCMD_H
1655+# include <xen/sys/privcmd.h>
1656+# endif
1657+#endif
1658+
1659+/* required for shutdown flags */
1660+#include <xen/sched.h>
1661+
1662+#include "virterror_internal.h"
1663+#include "logging.h"
1664+#include "datatypes.h"
1665+#include "driver.h"
1666+#include "util.h"
1667+#include "xen_driver.h"
1668+#include "xen_hypervisor.h"
1669+#include "xs_internal.h"
1670+#include "stats_linux.h"
1671+#include "block_stats.h"
1672+#include "xend_internal.h"
1673+#include "buf.h"
1674+#include "capabilities.h"
1675+#include "memory.h"
1676+
1677+#define VIR_FROM_THIS VIR_FROM_XEN
1678+
1679+/*
1680+ * so far there is 2 versions of the structures usable for doing
1681+ * hypervisor calls.
1682+ */
1683+/* the old one */
1684+typedef struct v0_hypercall_struct {
1685+ unsigned long op;
1686+ unsigned long arg[5];
1687+} v0_hypercall_t;
1688+
1689+#ifdef __linux__
1690+# define XEN_V0_IOCTL_HYPERCALL_CMD \
1691+ _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t))
1692+/* the new one */
1693+typedef struct v1_hypercall_struct
1694+{
1695+ uint64_t op;
1696+ uint64_t arg[5];
1697+} v1_hypercall_t;
1698+# define XEN_V1_IOCTL_HYPERCALL_CMD \
1699+ _IOC(_IOC_NONE, 'P', 0, sizeof(v1_hypercall_t))
1700+typedef v1_hypercall_t hypercall_t;
1701+#elif defined(__sun)
1702+typedef privcmd_hypercall_t hypercall_t;
1703+#else
1704+# error "unsupported platform"
1705+#endif
1706+
1707+#ifndef __HYPERVISOR_sysctl
1708+# define __HYPERVISOR_sysctl 35
1709+#endif
1710+#ifndef __HYPERVISOR_domctl
1711+# define __HYPERVISOR_domctl 36
1712+#endif
1713+
1714+#ifdef WITH_RHEL5_API
1715+# define SYS_IFACE_MIN_VERS_NUMA 3
1716+#else
1717+# define SYS_IFACE_MIN_VERS_NUMA 4
1718+#endif
1719+
1720+/* xen-unstable changeset 19788 removed MAX_VIRT_CPUS from public
1721+ * headers. Its semanitc was retained with XEN_LEGACY_MAX_VCPUS.
1722+ * Ensure MAX_VIRT_CPUS is defined accordingly.
1723+ */
1724+#if !defined(MAX_VIRT_CPUS) && defined(XEN_LEGACY_MAX_VCPUS)
1725+# define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS
1726+#endif
1727+
1728+static int xen_ioctl_hypercall_cmd = 0;
1729+static int initialized = 0;
1730+static int in_init = 0;
1731+static int hv_version = 0;
1732+static int hypervisor_version = 2;
1733+static int sys_interface_version = -1;
1734+static int dom_interface_version = -1;
1735+static int kb_per_pages = 0;
1736+
1737+/* Regular expressions used by xenHypervisorGetCapabilities, and
1738+ * compiled once by xenHypervisorInit. Note that these are POSIX.2
1739+ * extended regular expressions (regex(7)).
1740+ */
1741+static const char *flags_hvm_re = "^flags[[:blank:]]+:.* (vmx|svm)[[:space:]]";
1742+static regex_t flags_hvm_rec;
1743+static const char *flags_pae_re = "^flags[[:blank:]]+:.* pae[[:space:]]";
1744+static regex_t flags_pae_rec;
1745+static const char *xen_cap_re = "(xen|hvm)-[[:digit:]]+\\.[[:digit:]]+-(x86_32|x86_64|ia64|powerpc64)(p|be)?";
1746+static regex_t xen_cap_rec;
1747+
1748+/*
1749+ * The content of the structures for a getdomaininfolist system hypercall
1750+ */
1751+#ifndef DOMFLAGS_DYING
1752+# define DOMFLAGS_DYING (1<<0) /* Domain is scheduled to die. */
1753+# define DOMFLAGS_HVM (1<<1) /* Domain is HVM */
1754+# define DOMFLAGS_SHUTDOWN (1<<2) /* The guest OS has shut down. */
1755+# define DOMFLAGS_PAUSED (1<<3) /* Currently paused by control software. */
1756+# define DOMFLAGS_BLOCKED (1<<4) /* Currently blocked pending an event. */
1757+# define DOMFLAGS_RUNNING (1<<5) /* Domain is currently running. */
1758+# define DOMFLAGS_CPUMASK 255 /* CPU to which this domain is bound. */
1759+# define DOMFLAGS_CPUSHIFT 8
1760+# define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code. */
1761+# define DOMFLAGS_SHUTDOWNSHIFT 16
1762+#endif
1763+
1764+/*
1765+ * These flags explain why a system is in the state of "shutdown". Normally,
1766+ * They are defined in xen/sched.h
1767+ */
1768+#ifndef SHUTDOWN_poweroff
1769+# define SHUTDOWN_poweroff 0 /* Domain exited normally. Clean up and kill. */
1770+# define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
1771+# define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
1772+# define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
1773+#endif
1774+
1775+#define XEN_V0_OP_GETDOMAININFOLIST 38
1776+#define XEN_V1_OP_GETDOMAININFOLIST 38
1777+#define XEN_V2_OP_GETDOMAININFOLIST 6
1778+
1779+struct xen_v0_getdomaininfo {
1780+ domid_t domain; /* the domain number */
1781+ uint32_t flags; /* flags, see before */
1782+ uint64_t tot_pages; /* total number of pages used */
1783+ uint64_t max_pages; /* maximum number of pages allowed */
1784+ unsigned long shared_info_frame; /* MFN of shared_info struct */
1785+ uint64_t cpu_time; /* CPU time used */
1786+ uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
1787+ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
1788+ uint32_t ssidref;
1789+ xen_domain_handle_t handle;
1790+};
1791+typedef struct xen_v0_getdomaininfo xen_v0_getdomaininfo;
1792+
1793+struct xen_v2_getdomaininfo {
1794+ domid_t domain; /* the domain number */
1795+ uint32_t flags; /* flags, see before */
1796+ uint64_t tot_pages; /* total number of pages used */
1797+ uint64_t max_pages; /* maximum number of pages allowed */
1798+ uint64_t shared_info_frame; /* MFN of shared_info struct */
1799+ uint64_t cpu_time; /* CPU time used */
1800+ uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
1801+ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
1802+ uint32_t ssidref;
1803+ xen_domain_handle_t handle;
1804+};
1805+typedef struct xen_v2_getdomaininfo xen_v2_getdomaininfo;
1806+
1807+
1808+/* As of Hypervisor Call v2, DomCtl v5 we are now 8-byte aligned
1809+ even on 32-bit archs when dealing with uint64_t */
1810+#define ALIGN_64 __attribute__((aligned(8)))
1811+
1812+struct xen_v2d5_getdomaininfo {
1813+ domid_t domain; /* the domain number */
1814+ uint32_t flags; /* flags, see before */
1815+ uint64_t tot_pages ALIGN_64; /* total number of pages used */
1816+ uint64_t max_pages ALIGN_64; /* maximum number of pages allowed */
1817+ uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
1818+ uint64_t cpu_time ALIGN_64; /* CPU time used */
1819+ uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
1820+ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
1821+ uint32_t ssidref;
1822+ xen_domain_handle_t handle;
1823+};
1824+typedef struct xen_v2d5_getdomaininfo xen_v2d5_getdomaininfo;
1825+
1826+struct xen_v2d6_getdomaininfo {
1827+ domid_t domain; /* the domain number */
1828+ uint32_t flags; /* flags, see before */
1829+ uint64_t tot_pages ALIGN_64; /* total number of pages used */
1830+ uint64_t max_pages ALIGN_64; /* maximum number of pages allowed */
1831+ uint64_t shr_pages ALIGN_64; /* number of shared pages */
1832+ uint64_t shared_info_frame ALIGN_64; /* MFN of shared_info struct */
1833+ uint64_t cpu_time ALIGN_64; /* CPU time used */
1834+ uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
1835+ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
1836+ uint32_t ssidref;
1837+ xen_domain_handle_t handle;
1838+};
1839+typedef struct xen_v2d6_getdomaininfo xen_v2d6_getdomaininfo;
1840+
1841+union xen_getdomaininfo {
1842+ struct xen_v0_getdomaininfo v0;
1843+ struct xen_v2_getdomaininfo v2;
1844+ struct xen_v2d5_getdomaininfo v2d5;
1845+ struct xen_v2d6_getdomaininfo v2d6;
1846+};
1847+typedef union xen_getdomaininfo xen_getdomaininfo;
1848+
1849+union xen_getdomaininfolist {
1850+ struct xen_v0_getdomaininfo *v0;
1851+ struct xen_v2_getdomaininfo *v2;
1852+ struct xen_v2d5_getdomaininfo *v2d5;
1853+ struct xen_v2d6_getdomaininfo *v2d6;
1854+};
1855+typedef union xen_getdomaininfolist xen_getdomaininfolist;
1856+
1857+
1858+struct xen_v2_getschedulerid {
1859+ uint32_t sched_id; /* Get Scheduler ID from Xen */
1860+};
1861+typedef struct xen_v2_getschedulerid xen_v2_getschedulerid;
1862+
1863+
1864+union xen_getschedulerid {
1865+ struct xen_v2_getschedulerid *v2;
1866+};
1867+typedef union xen_getschedulerid xen_getschedulerid;
1868+
1869+struct xen_v2s4_availheap {
1870+ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
1871+ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */
1872+ int32_t node; /* NUMA node (-1 for sum across all nodes). */
1873+ uint64_t avail_bytes; /* Bytes available in the specified region. */
1874+};
1875+
1876+typedef struct xen_v2s4_availheap xen_v2s4_availheap;
1877+
1878+struct xen_v2s5_availheap {
1879+ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
1880+ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */
1881+ int32_t node; /* NUMA node (-1 for sum across all nodes). */
1882+ uint64_t avail_bytes ALIGN_64; /* Bytes available in the specified region. */
1883+};
1884+
1885+typedef struct xen_v2s5_availheap xen_v2s5_availheap;
1886+
1887+
1888+#define XEN_GETDOMAININFOLIST_ALLOC(domlist, size) \
1889+ (hypervisor_version < 2 ? \
1890+ (VIR_ALLOC_N(domlist.v0, (size)) == 0) : \
1891+ (dom_interface_version >= 6 ? \
1892+ (VIR_ALLOC_N(domlist.v2d6, (size)) == 0) : \
1893+ (dom_interface_version == 5 ? \
1894+ (VIR_ALLOC_N(domlist.v2d5, (size)) == 0) : \
1895+ (VIR_ALLOC_N(domlist.v2, (size)) == 0))))
1896+
1897+#define XEN_GETDOMAININFOLIST_FREE(domlist) \
1898+ (hypervisor_version < 2 ? \
1899+ VIR_FREE(domlist.v0) : \
1900+ (dom_interface_version >= 6 ? \
1901+ VIR_FREE(domlist.v2d6) : \
1902+ (dom_interface_version == 5 ? \
1903+ VIR_FREE(domlist.v2d5) : \
1904+ VIR_FREE(domlist.v2))))
1905+
1906+#define XEN_GETDOMAININFOLIST_CLEAR(domlist, size) \
1907+ (hypervisor_version < 2 ? \
1908+ memset(domlist.v0, 0, sizeof(*domlist.v0) * size) : \
1909+ (dom_interface_version >= 6 ? \
1910+ memset(domlist.v2d6, 0, sizeof(*domlist.v2d6) * size) : \
1911+ (dom_interface_version == 5 ? \
1912+ memset(domlist.v2d5, 0, sizeof(*domlist.v2d5) * size) : \
1913+ memset(domlist.v2, 0, sizeof(*domlist.v2) * size))))
1914+
1915+#define XEN_GETDOMAININFOLIST_DOMAIN(domlist, n) \
1916+ (hypervisor_version < 2 ? \
1917+ domlist.v0[n].domain : \
1918+ (dom_interface_version >= 6 ? \
1919+ domlist.v2d6[n].domain : \
1920+ (dom_interface_version == 5 ? \
1921+ domlist.v2d5[n].domain : \
1922+ domlist.v2[n].domain)))
1923+
1924+#define XEN_GETDOMAININFOLIST_UUID(domlist, n) \
1925+ (hypervisor_version < 2 ? \
1926+ domlist.v0[n].handle : \
1927+ (dom_interface_version >= 6 ? \
1928+ domlist.v2d6[n].handle : \
1929+ (dom_interface_version == 5 ? \
1930+ domlist.v2d5[n].handle : \
1931+ domlist.v2[n].handle)))
1932+
1933+#define XEN_GETDOMAININFOLIST_DATA(domlist) \
1934+ (hypervisor_version < 2 ? \
1935+ (void*)(domlist->v0) : \
1936+ (dom_interface_version >= 6 ? \
1937+ (void*)(domlist->v2d6) : \
1938+ (dom_interface_version == 5 ? \
1939+ (void*)(domlist->v2d5) : \
1940+ (void*)(domlist->v2))))
1941+
1942+#define XEN_GETDOMAININFO_SIZE \
1943+ (hypervisor_version < 2 ? \
1944+ sizeof(xen_v0_getdomaininfo) : \
1945+ (dom_interface_version >= 6 ? \
1946+ sizeof(xen_v2d6_getdomaininfo) : \
1947+ (dom_interface_version == 5 ? \
1948+ sizeof(xen_v2d5_getdomaininfo) : \
1949+ sizeof(xen_v2_getdomaininfo))))
1950+
1951+#define XEN_GETDOMAININFO_CLEAR(dominfo) \
1952+ (hypervisor_version < 2 ? \
1953+ memset(&(dominfo.v0), 0, sizeof(xen_v0_getdomaininfo)) : \
1954+ (dom_interface_version >= 6 ? \
1955+ memset(&(dominfo.v2d6), 0, sizeof(xen_v2d6_getdomaininfo)) : \
1956+ (dom_interface_version == 5 ? \
1957+ memset(&(dominfo.v2d5), 0, sizeof(xen_v2d5_getdomaininfo)) : \
1958+ memset(&(dominfo.v2), 0, sizeof(xen_v2_getdomaininfo)))))
1959+
1960+#define XEN_GETDOMAININFO_DOMAIN(dominfo) \
1961+ (hypervisor_version < 2 ? \
1962+ dominfo.v0.domain : \
1963+ (dom_interface_version >= 6 ? \
1964+ dominfo.v2d6.domain : \
1965+ (dom_interface_version == 5 ? \
1966+ dominfo.v2d5.domain : \
1967+ dominfo.v2.domain)))
1968+
1969+#define XEN_GETDOMAININFO_CPUTIME(dominfo) \
1970+ (hypervisor_version < 2 ? \
1971+ dominfo.v0.cpu_time : \
1972+ (dom_interface_version >= 6 ? \
1973+ dominfo.v2d6.cpu_time : \
1974+ (dom_interface_version == 5 ? \
1975+ dominfo.v2d5.cpu_time : \
1976+ dominfo.v2.cpu_time)))
1977+
1978+
1979+#define XEN_GETDOMAININFO_CPUCOUNT(dominfo) \
1980+ (hypervisor_version < 2 ? \
1981+ dominfo.v0.nr_online_vcpus : \
1982+ (dom_interface_version >= 6 ? \
1983+ dominfo.v2d6.nr_online_vcpus : \
1984+ (dom_interface_version == 5 ? \
1985+ dominfo.v2d5.nr_online_vcpus : \
1986+ dominfo.v2.nr_online_vcpus)))
1987+
1988+#define XEN_GETDOMAININFO_MAXCPUID(dominfo) \
1989+ (hypervisor_version < 2 ? \
1990+ dominfo.v0.max_vcpu_id : \
1991+ (dom_interface_version >= 6 ? \
1992+ dominfo.v2d6.max_vcpu_id : \
1993+ (dom_interface_version == 5 ? \
1994+ dominfo.v2d5.max_vcpu_id : \
1995+ dominfo.v2.max_vcpu_id)))
1996+
1997+#define XEN_GETDOMAININFO_FLAGS(dominfo) \
1998+ (hypervisor_version < 2 ? \
1999+ dominfo.v0.flags : \
2000+ (dom_interface_version >= 6 ? \
2001+ dominfo.v2d6.flags : \
2002+ (dom_interface_version == 5 ? \
2003+ dominfo.v2d5.flags : \
2004+ dominfo.v2.flags)))
2005+
2006+#define XEN_GETDOMAININFO_TOT_PAGES(dominfo) \
2007+ (hypervisor_version < 2 ? \
2008+ dominfo.v0.tot_pages : \
2009+ (dom_interface_version >= 6 ? \
2010+ dominfo.v2d6.tot_pages : \
2011+ (dom_interface_version == 5 ? \
2012+ dominfo.v2d5.tot_pages : \
2013+ dominfo.v2.tot_pages)))
2014+
2015+#define XEN_GETDOMAININFO_MAX_PAGES(dominfo) \
2016+ (hypervisor_version < 2 ? \
2017+ dominfo.v0.max_pages : \
2018+ (dom_interface_version >= 6 ? \
2019+ dominfo.v2d6.max_pages : \
2020+ (dom_interface_version == 5 ? \
2021+ dominfo.v2d5.max_pages : \
2022+ dominfo.v2.max_pages)))
2023+
2024+#define XEN_GETDOMAININFO_UUID(dominfo) \
2025+ (hypervisor_version < 2 ? \
2026+ dominfo.v0.handle : \
2027+ (dom_interface_version >= 6 ? \
2028+ dominfo.v2d6.handle : \
2029+ (dom_interface_version == 5 ? \
2030+ dominfo.v2d5.handle : \
2031+ dominfo.v2.handle)))
2032+
2033+
2034+static int
2035+lock_pages(void *addr, size_t len)
2036+{
2037+#ifdef __linux__
2038+ return (mlock(addr, len));
2039+#elif defined(__sun)
2040+ return (0);
2041+#endif
2042+}
2043+
2044+static int
2045+unlock_pages(void *addr, size_t len)
2046+{
2047+#ifdef __linux__
2048+ return (munlock(addr, len));
2049+#elif defined(__sun)
2050+ return (0);
2051+#endif
2052+}
2053+
2054+
2055+struct xen_v0_getdomaininfolistop {
2056+ domid_t first_domain;
2057+ uint32_t max_domains;
2058+ struct xen_v0_getdomaininfo *buffer;
2059+ uint32_t num_domains;
2060+};
2061+typedef struct xen_v0_getdomaininfolistop xen_v0_getdomaininfolistop;
2062+
2063+
2064+struct xen_v2_getdomaininfolistop {
2065+ domid_t first_domain;
2066+ uint32_t max_domains;
2067+ struct xen_v2_getdomaininfo *buffer;
2068+ uint32_t num_domains;
2069+};
2070+typedef struct xen_v2_getdomaininfolistop xen_v2_getdomaininfolistop;
2071+
2072+/* As of HV version 2, sysctl version 3 the *buffer pointer is 64-bit aligned */
2073+struct xen_v2s3_getdomaininfolistop {
2074+ domid_t first_domain;
2075+ uint32_t max_domains;
2076+#ifdef __BIG_ENDIAN__
2077+ struct {
2078+ int __pad[(sizeof (long long) - sizeof (struct xen_v2d5_getdomaininfo *)) / sizeof (int)];
2079+ struct xen_v2d5_getdomaininfo *v;
2080+ } buffer;
2081+#else
2082+ union {
2083+ struct xen_v2d5_getdomaininfo *v;
2084+ uint64_t pad ALIGN_64;
2085+ } buffer;
2086+#endif
2087+ uint32_t num_domains;
2088+};
2089+typedef struct xen_v2s3_getdomaininfolistop xen_v2s3_getdomaininfolistop;
2090+
2091+
2092+
2093+struct xen_v0_domainop {
2094+ domid_t domain;
2095+};
2096+typedef struct xen_v0_domainop xen_v0_domainop;
2097+
2098+/*
2099+ * The information for a destroydomain system hypercall
2100+ */
2101+#define XEN_V0_OP_DESTROYDOMAIN 9
2102+#define XEN_V1_OP_DESTROYDOMAIN 9
2103+#define XEN_V2_OP_DESTROYDOMAIN 2
2104+
2105+/*
2106+ * The information for a pausedomain system hypercall
2107+ */
2108+#define XEN_V0_OP_PAUSEDOMAIN 10
2109+#define XEN_V1_OP_PAUSEDOMAIN 10
2110+#define XEN_V2_OP_PAUSEDOMAIN 3
2111+
2112+/*
2113+ * The information for an unpausedomain system hypercall
2114+ */
2115+#define XEN_V0_OP_UNPAUSEDOMAIN 11
2116+#define XEN_V1_OP_UNPAUSEDOMAIN 11
2117+#define XEN_V2_OP_UNPAUSEDOMAIN 4
2118+
2119+/*
2120+ * The information for an setmaxmem system hypercall
2121+ */
2122+#define XEN_V0_OP_SETMAXMEM 28
2123+#define XEN_V1_OP_SETMAXMEM 28
2124+#define XEN_V2_OP_SETMAXMEM 11
2125+
2126+struct xen_v0_setmaxmem {
2127+ domid_t domain;
2128+ uint64_t maxmem;
2129+};
2130+typedef struct xen_v0_setmaxmem xen_v0_setmaxmem;
2131+typedef struct xen_v0_setmaxmem xen_v1_setmaxmem;
2132+
2133+struct xen_v2_setmaxmem {
2134+ uint64_t maxmem;
2135+};
2136+typedef struct xen_v2_setmaxmem xen_v2_setmaxmem;
2137+
2138+struct xen_v2d5_setmaxmem {
2139+ uint64_t maxmem ALIGN_64;
2140+};
2141+typedef struct xen_v2d5_setmaxmem xen_v2d5_setmaxmem;
2142+
2143+/*
2144+ * The information for an setmaxvcpu system hypercall
2145+ */
2146+#define XEN_V0_OP_SETMAXVCPU 41
2147+#define XEN_V1_OP_SETMAXVCPU 41
2148+#define XEN_V2_OP_SETMAXVCPU 15
2149+
2150+struct xen_v0_setmaxvcpu {
2151+ domid_t domain;
2152+ uint32_t maxvcpu;
2153+};
2154+typedef struct xen_v0_setmaxvcpu xen_v0_setmaxvcpu;
2155+typedef struct xen_v0_setmaxvcpu xen_v1_setmaxvcpu;
2156+
2157+struct xen_v2_setmaxvcpu {
2158+ uint32_t maxvcpu;
2159+};
2160+typedef struct xen_v2_setmaxvcpu xen_v2_setmaxvcpu;
2161+
2162+/*
2163+ * The information for an setvcpumap system hypercall
2164+ * Note that between 1 and 2 the limitation to 64 physical CPU was lifted
2165+ * hence the difference in structures
2166+ */
2167+#define XEN_V0_OP_SETVCPUMAP 20
2168+#define XEN_V1_OP_SETVCPUMAP 20
2169+#define XEN_V2_OP_SETVCPUMAP 9
2170+
2171+struct xen_v0_setvcpumap {
2172+ domid_t domain;
2173+ uint32_t vcpu;
2174+ cpumap_t cpumap;
2175+};
2176+typedef struct xen_v0_setvcpumap xen_v0_setvcpumap;
2177+typedef struct xen_v0_setvcpumap xen_v1_setvcpumap;
2178+
2179+struct xen_v2_cpumap {
2180+ uint8_t *bitmap;
2181+ uint32_t nr_cpus;
2182+};
2183+struct xen_v2_setvcpumap {
2184+ uint32_t vcpu;
2185+ struct xen_v2_cpumap cpumap;
2186+};
2187+typedef struct xen_v2_setvcpumap xen_v2_setvcpumap;
2188+
2189+/* HV version 2, Dom version 5 requires 64-bit alignment */
2190+struct xen_v2d5_cpumap {
2191+#ifdef __BIG_ENDIAN__
2192+ struct {
2193+ int __pad[(sizeof (long long) - sizeof (uint8_t *)) / sizeof (int)];
2194+ uint8_t *v;
2195+ } bitmap;
2196+#else
2197+ union {
2198+ uint8_t *v;
2199+ uint64_t pad ALIGN_64;
2200+ } bitmap;
2201+#endif
2202+ uint32_t nr_cpus;
2203+};
2204+struct xen_v2d5_setvcpumap {
2205+ uint32_t vcpu;
2206+ struct xen_v2d5_cpumap cpumap;
2207+};
2208+typedef struct xen_v2d5_setvcpumap xen_v2d5_setvcpumap;
2209+
2210+/*
2211+ * The information for an vcpuinfo system hypercall
2212+ */
2213+#define XEN_V0_OP_GETVCPUINFO 43
2214+#define XEN_V1_OP_GETVCPUINFO 43
2215+#define XEN_V2_OP_GETVCPUINFO 14
2216+
2217+struct xen_v0_vcpuinfo {
2218+ domid_t domain; /* owner's domain */
2219+ uint32_t vcpu; /* the vcpu number */
2220+ uint8_t online; /* seen as on line */
2221+ uint8_t blocked; /* blocked on event */
2222+ uint8_t running; /* scheduled on CPU */
2223+ uint64_t cpu_time; /* nanosecond of CPU used */
2224+ uint32_t cpu; /* current mapping */
2225+ cpumap_t cpumap; /* deprecated in V2 */
2226+};
2227+typedef struct xen_v0_vcpuinfo xen_v0_vcpuinfo;
2228+typedef struct xen_v0_vcpuinfo xen_v1_vcpuinfo;
2229+
2230+struct xen_v2_vcpuinfo {
2231+ uint32_t vcpu; /* the vcpu number */
2232+ uint8_t online; /* seen as on line */
2233+ uint8_t blocked; /* blocked on event */
2234+ uint8_t running; /* scheduled on CPU */
2235+ uint64_t cpu_time; /* nanosecond of CPU used */
2236+ uint32_t cpu; /* current mapping */
2237+};
2238+typedef struct xen_v2_vcpuinfo xen_v2_vcpuinfo;
2239+
2240+struct xen_v2d5_vcpuinfo {
2241+ uint32_t vcpu; /* the vcpu number */
2242+ uint8_t online; /* seen as on line */
2243+ uint8_t blocked; /* blocked on event */
2244+ uint8_t running; /* scheduled on CPU */
2245+ uint64_t cpu_time ALIGN_64; /* nanosecond of CPU used */
2246+ uint32_t cpu; /* current mapping */
2247+};
2248+typedef struct xen_v2d5_vcpuinfo xen_v2d5_vcpuinfo;
2249+
2250+/*
2251+ * from V2 the pinning of a vcpu is read with a separate call
2252+ */
2253+#define XEN_V2_OP_GETVCPUMAP 25
2254+typedef struct xen_v2_setvcpumap xen_v2_getvcpumap;
2255+typedef struct xen_v2d5_setvcpumap xen_v2d5_getvcpumap;
2256+
2257+/*
2258+ * from V2 we get the scheduler information
2259+ */
2260+#define XEN_V2_OP_GETSCHEDULERID 4
2261+
2262+/*
2263+ * from V2 we get the available heap information
2264+ */
2265+#define XEN_V2_OP_GETAVAILHEAP 9
2266+
2267+/*
2268+ * from V2 we get the scheduler parameter
2269+ */
2270+#define XEN_V2_OP_SCHEDULER 16
2271+/* Scheduler types. */
2272+#define XEN_SCHEDULER_SEDF 4
2273+#define XEN_SCHEDULER_CREDIT 5
2274+/* get/set scheduler parameters */
2275+#define XEN_DOMCTL_SCHEDOP_putinfo 0
2276+#define XEN_DOMCTL_SCHEDOP_getinfo 1
2277+
2278+struct xen_v2_setschedinfo {
2279+ uint32_t sched_id;
2280+ uint32_t cmd;
2281+ union {
2282+ struct xen_domctl_sched_sedf {
2283+ uint64_t period ALIGN_64;
2284+ uint64_t slice ALIGN_64;
2285+ uint64_t latency ALIGN_64;
2286+ uint32_t extratime;
2287+ uint32_t weight;
2288+ } sedf;
2289+ struct xen_domctl_sched_credit {
2290+ uint16_t weight;
2291+ uint16_t cap;
2292+ } credit;
2293+ } u;
2294+};
2295+typedef struct xen_v2_setschedinfo xen_v2_setschedinfo;
2296+typedef struct xen_v2_setschedinfo xen_v2_getschedinfo;
2297+
2298+
2299+/*
2300+ * The hypercall operation structures also have changed on
2301+ * changeset 86d26e6ec89b
2302+ */
2303+/* the old structure */
2304+struct xen_op_v0 {
2305+ uint32_t cmd;
2306+ uint32_t interface_version;
2307+ union {
2308+ xen_v0_getdomaininfolistop getdomaininfolist;
2309+ xen_v0_domainop domain;
2310+ xen_v0_setmaxmem setmaxmem;
2311+ xen_v0_setmaxvcpu setmaxvcpu;
2312+ xen_v0_setvcpumap setvcpumap;
2313+ xen_v0_vcpuinfo getvcpuinfo;
2314+ uint8_t padding[128];
2315+ } u;
2316+};
2317+typedef struct xen_op_v0 xen_op_v0;
2318+typedef struct xen_op_v0 xen_op_v1;
2319+
2320+/* the new structure for systems operations */
2321+struct xen_op_v2_sys {
2322+ uint32_t cmd;
2323+ uint32_t interface_version;
2324+ union {
2325+ xen_v2_getdomaininfolistop getdomaininfolist;
2326+ xen_v2s3_getdomaininfolistop getdomaininfolists3;
2327+ xen_v2_getschedulerid getschedulerid;
2328+ xen_v2s4_availheap availheap;
2329+ xen_v2s5_availheap availheap5;
2330+ uint8_t padding[128];
2331+ } u;
2332+};
2333+typedef struct xen_op_v2_sys xen_op_v2_sys;
2334+
2335+/* the new structure for domains operation */
2336+struct xen_op_v2_dom {
2337+ uint32_t cmd;
2338+ uint32_t interface_version;
2339+ domid_t domain;
2340+ union {
2341+ xen_v2_setmaxmem setmaxmem;
2342+ xen_v2d5_setmaxmem setmaxmemd5;
2343+ xen_v2_setmaxvcpu setmaxvcpu;
2344+ xen_v2_setvcpumap setvcpumap;
2345+ xen_v2d5_setvcpumap setvcpumapd5;
2346+ xen_v2_vcpuinfo getvcpuinfo;
2347+ xen_v2d5_vcpuinfo getvcpuinfod5;
2348+ xen_v2_getvcpumap getvcpumap;
2349+ xen_v2d5_getvcpumap getvcpumapd5;
2350+ xen_v2_setschedinfo setschedinfo;
2351+ xen_v2_getschedinfo getschedinfo;
2352+ uint8_t padding[128];
2353+ } u;
2354+};
2355+typedef struct xen_op_v2_dom xen_op_v2_dom;
2356+
2357+
2358+#ifdef __linux__
2359+# define XEN_HYPERVISOR_SOCKET "/proc/xen/privcmd"
2360+# define HYPERVISOR_CAPABILITIES "/sys/hypervisor/properties/capabilities"
2361+#elif defined(__sun)
2362+# define XEN_HYPERVISOR_SOCKET "/dev/xen/privcmd"
2363+#else
2364+# error "unsupported platform"
2365+#endif
2366+
2367+#ifndef PROXY
2368+static unsigned long xenHypervisorGetMaxMemory(virDomainPtr domain);
2369+#endif
2370+
2371+#ifndef PROXY
2372+struct xenUnifiedDriver xenHypervisorDriver = {
2373+ xenHypervisorOpen, /* open */
2374+ xenHypervisorClose, /* close */
2375+ xenHypervisorGetVersion, /* version */
2376+ NULL, /* hostname */
2377+ NULL, /* nodeGetInfo */
2378+ xenHypervisorGetCapabilities, /* getCapabilities */
2379+ xenHypervisorListDomains, /* listDomains */
2380+ xenHypervisorNumOfDomains, /* numOfDomains */
2381+ NULL, /* domainCreateXML */
2382+ xenHypervisorPauseDomain, /* domainSuspend */
2383+ xenHypervisorResumeDomain, /* domainResume */
2384+ NULL, /* domainShutdown */
2385+ NULL, /* domainReboot */
2386+ xenHypervisorDestroyDomain, /* domainDestroy */
2387+ xenHypervisorDomainGetOSType, /* domainGetOSType */
2388+ xenHypervisorGetMaxMemory, /* domainGetMaxMemory */
2389+ xenHypervisorSetMaxMemory, /* domainSetMaxMemory */
2390+ NULL, /* domainSetMemory */
2391+ xenHypervisorGetDomainInfo, /* domainGetInfo */
2392+ NULL, /* domainSave */
2393+ NULL, /* domainRestore */
2394+ NULL, /* domainCoreDump */
2395+ xenHypervisorPinVcpu, /* domainPinVcpu */
2396+ xenHypervisorGetVcpus, /* domainGetVcpus */
2397+ NULL, /* listDefinedDomains */
2398+ NULL, /* numOfDefinedDomains */
2399+ NULL, /* domainCreate */
2400+ NULL, /* domainDefineXML */
2401+ NULL, /* domainUndefine */
2402+ NULL, /* domainAttachDeviceFlags */
2403+ NULL, /* domainDetachDeviceFlags */
2404+ NULL, /* domainUpdateDeviceFlags */
2405+ NULL, /* domainGetAutostart */
2406+ NULL, /* domainSetAutostart */
2407+ xenHypervisorGetSchedulerType, /* domainGetSchedulerType */
2408+ xenHypervisorGetSchedulerParameters, /* domainGetSchedulerParameters */
2409+ xenHypervisorSetSchedulerParameters, /* domainSetSchedulerParameters */
2410+};
2411+#endif /* !PROXY */
2412+
2413+#define virXenError(code, ...) \
2414+ if (in_init == 0) \
2415+ virReportErrorHelper(NULL, VIR_FROM_XEN, code, __FILE__, \
2416+ __FUNCTION__, __LINE__, __VA_ARGS__)
2417+
2418+#ifndef PROXY
2419+
2420+/**
2421+ * virXenErrorFunc:
2422+ * @error: the error number
2423+ * @func: the function failing
2424+ * @info: extra information string
2425+ * @value: extra information number
2426+ *
2427+ * Handle an error at the xend daemon interface
2428+ */
2429+static void
2430+virXenErrorFunc(virErrorNumber error, const char *func, const char *info,
2431+ int value)
2432+{
2433+ char fullinfo[1000];
2434+ const char *errmsg;
2435+
2436+ if ((error == VIR_ERR_OK) || (in_init != 0))
2437+ return;
2438+
2439+
2440+ errmsg =virErrorMsg(error, info);
2441+ if (func != NULL) {
2442+ snprintf(fullinfo, 999, "%s: %s", func, info);
2443+ fullinfo[999] = 0;
2444+ virRaiseError(NULL, NULL, NULL, VIR_FROM_XEN, error, VIR_ERR_ERROR,
2445+ errmsg, fullinfo, NULL, value, 0, errmsg, fullinfo,
2446+ value);
2447+ } else {
2448+ virRaiseError(NULL, NULL, NULL, VIR_FROM_XEN, error, VIR_ERR_ERROR,
2449+ errmsg, info, NULL, value, 0, errmsg, info,
2450+ value);
2451+ }
2452+}
2453+
2454+#endif /* PROXY */
2455+
2456+/**
2457+ * xenHypervisorDoV0Op:
2458+ * @handle: the handle to the Xen hypervisor
2459+ * @op: pointer to the hypervisor operation structure
2460+ *
2461+ * Do an hypervisor operation though the old interface,
2462+ * this leads to an hypervisor call through ioctl.
2463+ *
2464+ * Returns 0 in case of success and -1 in case of error.
2465+ */
2466+static int
2467+xenHypervisorDoV0Op(int handle, xen_op_v0 * op)
2468+{
2469+ int ret;
2470+ v0_hypercall_t hc;
2471+
2472+ memset(&hc, 0, sizeof(hc));
2473+ op->interface_version = hv_version << 8;
2474+ hc.op = __HYPERVISOR_dom0_op;
2475+ hc.arg[0] = (unsigned long) op;
2476+
2477+ if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
2478+ virXenError(VIR_ERR_XEN_CALL, " locking");
2479+ return (-1);
2480+ }
2481+
2482+ ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
2483+ if (ret < 0) {
2484+ virXenError(VIR_ERR_XEN_CALL, " ioctl %d",
2485+ xen_ioctl_hypercall_cmd);
2486+ }
2487+
2488+ if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
2489+ virXenError(VIR_ERR_XEN_CALL, " releasing");
2490+ ret = -1;
2491+ }
2492+
2493+ if (ret < 0)
2494+ return (-1);
2495+
2496+ return (0);
2497+}
2498+/**
2499+ * xenHypervisorDoV1Op:
2500+ * @handle: the handle to the Xen hypervisor
2501+ * @op: pointer to the hypervisor operation structure
2502+ *
2503+ * Do an hypervisor v1 operation, this leads to an hypervisor call through
2504+ * ioctl.
2505+ *
2506+ * Returns 0 in case of success and -1 in case of error.
2507+ */
2508+static int
2509+xenHypervisorDoV1Op(int handle, xen_op_v1* op)
2510+{
2511+ int ret;
2512+ hypercall_t hc;
2513+
2514+ memset(&hc, 0, sizeof(hc));
2515+ op->interface_version = DOM0_INTERFACE_VERSION;
2516+ hc.op = __HYPERVISOR_dom0_op;
2517+ hc.arg[0] = (unsigned long) op;
2518+
2519+ if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
2520+ virXenError(VIR_ERR_XEN_CALL, " locking");
2521+ return (-1);
2522+ }
2523+
2524+ ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
2525+ if (ret < 0) {
2526+ virXenError(VIR_ERR_XEN_CALL, " ioctl %d",
2527+ xen_ioctl_hypercall_cmd);
2528+ }
2529+
2530+ if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
2531+ virXenError(VIR_ERR_XEN_CALL, " releasing");
2532+ ret = -1;
2533+ }
2534+
2535+ if (ret < 0)
2536+ return (-1);
2537+
2538+ return (0);
2539+}
2540+
2541+/**
2542+ * xenHypervisorDoV2Sys:
2543+ * @handle: the handle to the Xen hypervisor
2544+ * @op: pointer to the hypervisor operation structure
2545+ *
2546+ * Do an hypervisor v2 system operation, this leads to an hypervisor
2547+ * call through ioctl.
2548+ *
2549+ * Returns 0 in case of success and -1 in case of error.
2550+ */
2551+static int
2552+xenHypervisorDoV2Sys(int handle, xen_op_v2_sys* op)
2553+{
2554+ int ret;
2555+ hypercall_t hc;
2556+
2557+ memset(&hc, 0, sizeof(hc));
2558+ op->interface_version = sys_interface_version;
2559+ hc.op = __HYPERVISOR_sysctl;
2560+ hc.arg[0] = (unsigned long) op;
2561+
2562+ if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
2563+ virXenError(VIR_ERR_XEN_CALL, " locking");
2564+ return (-1);
2565+ }
2566+
2567+ ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
2568+ if (ret < 0) {
2569+ virXenError(VIR_ERR_XEN_CALL, " sys ioctl %d",
2570+ xen_ioctl_hypercall_cmd);
2571+ }
2572+
2573+ if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
2574+ virXenError(VIR_ERR_XEN_CALL, " releasing");
2575+ ret = -1;
2576+ }
2577+
2578+ if (ret < 0)
2579+ return (-1);
2580+
2581+ return (0);
2582+}
2583+
2584+/**
2585+ * xenHypervisorDoV2Dom:
2586+ * @handle: the handle to the Xen hypervisor
2587+ * @op: pointer to the hypervisor domain operation structure
2588+ *
2589+ * Do an hypervisor v2 domain operation, this leads to an hypervisor
2590+ * call through ioctl.
2591+ *
2592+ * Returns 0 in case of success and -1 in case of error.
2593+ */
2594+static int
2595+xenHypervisorDoV2Dom(int handle, xen_op_v2_dom* op)
2596+{
2597+ int ret;
2598+ hypercall_t hc;
2599+
2600+ memset(&hc, 0, sizeof(hc));
2601+ op->interface_version = dom_interface_version;
2602+ hc.op = __HYPERVISOR_domctl;
2603+ hc.arg[0] = (unsigned long) op;
2604+
2605+ if (lock_pages(op, sizeof(dom0_op_t)) < 0) {
2606+ virXenError(VIR_ERR_XEN_CALL, " locking");
2607+ return (-1);
2608+ }
2609+
2610+ ret = ioctl(handle, xen_ioctl_hypercall_cmd, (unsigned long) &hc);
2611+ if (ret < 0) {
2612+ virXenError(VIR_ERR_XEN_CALL, " ioctl %d",
2613+ xen_ioctl_hypercall_cmd);
2614+ }
2615+
2616+ if (unlock_pages(op, sizeof(dom0_op_t)) < 0) {
2617+ virXenError(VIR_ERR_XEN_CALL, " releasing");
2618+ ret = -1;
2619+ }
2620+
2621+ if (ret < 0)
2622+ return (-1);
2623+
2624+ return (0);
2625+}
2626+
2627+/**
2628+ * virXen_getdomaininfolist:
2629+ * @handle: the hypervisor handle
2630+ * @first_domain: first domain in the range
2631+ * @maxids: maximum number of domains to list
2632+ * @dominfos: output structures
2633+ *
2634+ * Do a low level hypercall to list existing domains information
2635+ *
2636+ * Returns the number of domains or -1 in case of failure
2637+ */
2638+static int
2639+virXen_getdomaininfolist(int handle, int first_domain, int maxids,
2640+ xen_getdomaininfolist *dominfos)
2641+{
2642+ int ret = -1;
2643+
2644+ if (lock_pages(XEN_GETDOMAININFOLIST_DATA(dominfos),
2645+ XEN_GETDOMAININFO_SIZE * maxids) < 0) {
2646+ virXenError(VIR_ERR_XEN_CALL, " locking");
2647+ return (-1);
2648+ }
2649+ if (hypervisor_version > 1) {
2650+ xen_op_v2_sys op;
2651+
2652+ memset(&op, 0, sizeof(op));
2653+ op.cmd = XEN_V2_OP_GETDOMAININFOLIST;
2654+
2655+ if (sys_interface_version < 3) {
2656+ op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
2657+ op.u.getdomaininfolist.max_domains = maxids;
2658+ op.u.getdomaininfolist.buffer = dominfos->v2;
2659+ op.u.getdomaininfolist.num_domains = maxids;
2660+ } else {
2661+ op.u.getdomaininfolists3.first_domain = (domid_t) first_domain;
2662+ op.u.getdomaininfolists3.max_domains = maxids;
2663+ op.u.getdomaininfolists3.buffer.v = dominfos->v2d5;
2664+ op.u.getdomaininfolists3.num_domains = maxids;
2665+ }
2666+ ret = xenHypervisorDoV2Sys(handle, &op);
2667+
2668+ if (ret == 0) {
2669+ if (sys_interface_version < 3)
2670+ ret = op.u.getdomaininfolist.num_domains;
2671+ else
2672+ ret = op.u.getdomaininfolists3.num_domains;
2673+ }
2674+ } else if (hypervisor_version == 1) {
2675+ xen_op_v1 op;
2676+
2677+ memset(&op, 0, sizeof(op));
2678+ op.cmd = XEN_V1_OP_GETDOMAININFOLIST;
2679+ op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
2680+ op.u.getdomaininfolist.max_domains = maxids;
2681+ op.u.getdomaininfolist.buffer = dominfos->v0;
2682+ op.u.getdomaininfolist.num_domains = maxids;
2683+ ret = xenHypervisorDoV1Op(handle, &op);
2684+ if (ret == 0)
2685+ ret = op.u.getdomaininfolist.num_domains;
2686+ } else if (hypervisor_version == 0) {
2687+ xen_op_v0 op;
2688+
2689+ memset(&op, 0, sizeof(op));
2690+ op.cmd = XEN_V0_OP_GETDOMAININFOLIST;
2691+ op.u.getdomaininfolist.first_domain = (domid_t) first_domain;
2692+ op.u.getdomaininfolist.max_domains = maxids;
2693+ op.u.getdomaininfolist.buffer = dominfos->v0;
2694+ op.u.getdomaininfolist.num_domains = maxids;
2695+ ret = xenHypervisorDoV0Op(handle, &op);
2696+ if (ret == 0)
2697+ ret = op.u.getdomaininfolist.num_domains;
2698+ }
2699+ if (unlock_pages(XEN_GETDOMAININFOLIST_DATA(dominfos),
2700+ XEN_GETDOMAININFO_SIZE * maxids) < 0) {
2701+ virXenError(VIR_ERR_XEN_CALL, " release");
2702+ ret = -1;
2703+ }
2704+ return(ret);
2705+}
2706+
2707+static int
2708+virXen_getdomaininfo(int handle, int first_domain,
2709+ xen_getdomaininfo *dominfo) {
2710+ xen_getdomaininfolist dominfos;
2711+
2712+ if (hypervisor_version < 2) {
2713+ dominfos.v0 = &(dominfo->v0);
2714+ } else {
2715+ dominfos.v2 = &(dominfo->v2);
2716+ }
2717+
2718+ return virXen_getdomaininfolist(handle, first_domain, 1, &dominfos);
2719+}
2720+
2721+
2722+#ifndef PROXY
2723+/**
2724+ * xenHypervisorGetSchedulerType:
2725+ * @domain: pointer to the Xen Hypervisor block
2726+ * @nparams:give a number of scheduler parameters.
2727+ *
2728+ * Do a low level hypercall to get scheduler type
2729+ *
2730+ * Returns scheduler name or NULL in case of failure
2731+ */
2732+char *
2733+xenHypervisorGetSchedulerType(virDomainPtr domain, int *nparams)
2734+{
2735+ char *schedulertype = NULL;
2736+ xenUnifiedPrivatePtr priv;
2737+
2738+ if (domain->conn == NULL) {
2739+ virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
2740+ "domain or conn is NULL", 0);
2741+ return NULL;
2742+ }
2743+
2744+ priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
2745+ if (priv->handle < 0) {
2746+ virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
2747+ "priv->handle invalid", 0);
2748+ return NULL;
2749+ }
2750+ if (domain->id < 0) {
2751+ virXenError(VIR_ERR_OPERATION_INVALID,
2752+ "%s", _("domain is not running"));
2753+ return NULL;
2754+ }
2755+
2756+ /*
2757+ * Support only dom_interface_version >=5
2758+ * (Xen3.1.0 or later)
2759+ * TODO: check on Xen 3.0.3
2760+ */
2761+ if (dom_interface_version < 5) {
2762+ virXenErrorFunc(VIR_ERR_NO_XEN, __FUNCTION__,
2763+ "unsupported in dom interface < 5", 0);
2764+ return NULL;
2765+ }
2766+
2767+ if (hypervisor_version > 1) {
2768+ xen_op_v2_sys op;
2769+ int ret;
2770+
2771+ memset(&op, 0, sizeof(op));
2772+ op.cmd = XEN_V2_OP_GETSCHEDULERID;
2773+ ret = xenHypervisorDoV2Sys(priv->handle, &op);
2774+ if (ret < 0)
2775+ return(NULL);
2776+
2777+ switch (op.u.getschedulerid.sched_id){
2778+ case XEN_SCHEDULER_SEDF:
2779+ schedulertype = strdup("sedf");
2780+ if (schedulertype == NULL)
2781+ virReportOOMError();
2782+ if (nparams)
2783+ *nparams = 6;
2784+ break;
2785+ case XEN_SCHEDULER_CREDIT:
2786+ schedulertype = strdup("credit");
2787+ if (schedulertype == NULL)
2788+ virReportOOMError();
2789+ if (nparams)
2790+ *nparams = 2;
2791+ break;
2792+ default:
2793+ break;
2794+ }
2795+ }
2796+
2797+ return schedulertype;
2798+}
2799+
2800+static const char *str_weight = "weight";
2801+static const char *str_cap = "cap";
2802+
2803+/**
2804+ * xenHypervisorGetSchedulerParameters:
2805+ * @domain: pointer to the Xen Hypervisor block
2806+ * @params: pointer to scheduler parameters.
2807+ * This memory area should be allocated before calling.
2808+ * @nparams:this parameter should be same as
2809+ * a given number of scheduler parameters.
2810+ * from xenHypervisorGetSchedulerType().
2811+ *
2812+ * Do a low level hypercall to get scheduler parameters
2813+ *
2814+ * Returns 0 or -1 in case of failure
2815+ */
2816+int
2817+xenHypervisorGetSchedulerParameters(virDomainPtr domain,
2818+ virSchedParameterPtr params, int *nparams)
2819+{
2820+ xenUnifiedPrivatePtr priv;
2821+
2822+ if (domain->conn == NULL) {
2823+ virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
2824+ "domain or conn is NULL", 0);
2825+ return -1;
2826+ }
2827+
2828+ priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
2829+ if (priv->handle < 0) {
2830+ virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
2831+ "priv->handle invalid", 0);
2832+ return -1;
2833+ }
2834+ if (domain->id < 0) {
2835+ virXenError(VIR_ERR_OPERATION_INVALID,
2836+ "%s", _("domain is not running"));
2837+ return -1;
2838+ }
2839+
2840+ /*
2841+ * Support only dom_interface_version >=5
2842+ * (Xen3.1.0 or later)
2843+ * TODO: check on Xen 3.0.3
2844+ */
2845+ if (dom_interface_version < 5) {
2846+ virXenErrorFunc(VIR_ERR_NO_XEN, __FUNCTION__,
2847+ "unsupported in dom interface < 5", 0);
2848+ return -1;
2849+ }
2850+
2851+ if (hypervisor_version > 1) {
2852+ xen_op_v2_sys op_sys;
2853+ xen_op_v2_dom op_dom;
2854+ int ret;
2855+
2856+ memset(&op_sys, 0, sizeof(op_sys));
2857+ op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
2858+ ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
2859+ if (ret < 0)
2860+ return -1;
2861+
2862+ switch (op_sys.u.getschedulerid.sched_id){
2863+ case XEN_SCHEDULER_SEDF:
2864+ /* TODO: Implement for Xen/SEDF */
2865+ TODO
2866+ return(-1);
2867+ case XEN_SCHEDULER_CREDIT:
2868+ if (*nparams < 2)
2869+ return(-1);
2870+ memset(&op_dom, 0, sizeof(op_dom));
2871+ op_dom.cmd = XEN_V2_OP_SCHEDULER;
2872+ op_dom.domain = (domid_t) domain->id;
2873+ op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
2874+ op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_getinfo;
2875+ ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
2876+ if (ret < 0)
2877+ return(-1);
2878+
2879+ if (virStrcpyStatic(params[0].field, str_weight) == NULL) {
2880+ virXenError(VIR_ERR_INTERNAL_ERROR,
2881+ "Weight %s too big for destination", str_weight);
2882+ return -1;
2883+ }
2884+ params[0].type = VIR_DOMAIN_SCHED_FIELD_UINT;
2885+ params[0].value.ui = op_dom.u.getschedinfo.u.credit.weight;
2886+
2887+ if (virStrcpyStatic(params[1].field, str_cap) == NULL) {
2888+ virXenError(VIR_ERR_INTERNAL_ERROR,
2889+ "Cap %s too big for destination", str_cap);
2890+ return -1;
2891+ }
2892+ params[1].type = VIR_DOMAIN_SCHED_FIELD_UINT;
2893+ params[1].value.ui = op_dom.u.getschedinfo.u.credit.cap;
2894+
2895+ *nparams = 2;
2896+ break;
2897+ default:
2898+ virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
2899+ "Unknown scheduler", op_sys.u.getschedulerid.sched_id);
2900+ return -1;
2901+ }
2902+ }
2903+
2904+ return 0;
2905+}
2906+
2907+/**
2908+ * xenHypervisorSetSchedulerParameters:
2909+ * @domain: pointer to the Xen Hypervisor block
2910+ * @nparams:give a number of scheduler setting parameters .
2911+ *
2912+ * Do a low level hypercall to set scheduler parameters
2913+ *
2914+ * Returns 0 or -1 in case of failure
2915+ */
2916+int
2917+xenHypervisorSetSchedulerParameters(virDomainPtr domain,
2918+ virSchedParameterPtr params, int nparams)
2919+{
2920+ int i;
2921+ unsigned int val;
2922+ xenUnifiedPrivatePtr priv;
2923+ char buf[256];
2924+
2925+ if (domain->conn == NULL) {
2926+ virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
2927+ "domain or conn is NULL", 0);
2928+ return -1;
2929+ }
2930+
2931+ if ((nparams == 0) || (params == NULL)) {
2932+ virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
2933+ "Noparameters given", 0);
2934+ return(-1);
2935+ }
2936+
2937+ priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
2938+ if (priv->handle < 0) {
2939+ virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
2940+ "priv->handle invalid", 0);
2941+ return -1;
2942+ }
2943+ if (domain->id < 0) {
2944+ virXenError(VIR_ERR_OPERATION_INVALID,
2945+ "%s", _("domain is not running"));
2946+ return -1;
2947+ }
2948+
2949+ /*
2950+ * Support only dom_interface_version >=5
2951+ * (Xen3.1.0 or later)
2952+ * TODO: check on Xen 3.0.3
2953+ */
2954+ if (dom_interface_version < 5) {
2955+ virXenErrorFunc(VIR_ERR_NO_XEN, __FUNCTION__,
2956+ "unsupported in dom interface < 5", 0);
2957+ return -1;
2958+ }
2959+
2960+ if (hypervisor_version > 1) {
2961+ xen_op_v2_sys op_sys;
2962+ xen_op_v2_dom op_dom;
2963+ int ret;
2964+
2965+ memset(&op_sys, 0, sizeof(op_sys));
2966+ op_sys.cmd = XEN_V2_OP_GETSCHEDULERID;
2967+ ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
2968+ if (ret == -1) return -1;
2969+
2970+ switch (op_sys.u.getschedulerid.sched_id){
2971+ case XEN_SCHEDULER_SEDF:
2972+ /* TODO: Implement for Xen/SEDF */
2973+ TODO
2974+ return(-1);
2975+ case XEN_SCHEDULER_CREDIT: {
2976+ memset(&op_dom, 0, sizeof(op_dom));
2977+ op_dom.cmd = XEN_V2_OP_SCHEDULER;
2978+ op_dom.domain = (domid_t) domain->id;
2979+ op_dom.u.getschedinfo.sched_id = XEN_SCHEDULER_CREDIT;
2980+ op_dom.u.getschedinfo.cmd = XEN_DOMCTL_SCHEDOP_putinfo;
2981+
2982+ /*
2983+ * credit scheduler parameters
2984+ * following values do not change the parameters
2985+ */
2986+ op_dom.u.getschedinfo.u.credit.weight = 0;
2987+ op_dom.u.getschedinfo.u.credit.cap = (uint16_t)~0U;
2988+
2989+ for (i = 0; i < nparams; i++) {
2990+ memset(&buf, 0, sizeof(buf));
2991+ if (STREQ (params[i].field, str_weight) &&
2992+ params[i].type == VIR_DOMAIN_SCHED_FIELD_UINT) {
2993+ val = params[i].value.ui;
2994+ if ((val < 1) || (val > USHRT_MAX)) {
2995+ snprintf(buf, sizeof(buf), _("Credit scheduler weight parameter (%d) is out of range (1-65535)"), val);
2996+ virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__, buf, val);
2997+ return(-1);
2998+ }
2999+ op_dom.u.getschedinfo.u.credit.weight = val;
3000+ } else if (STREQ (params[i].field, str_cap) &&
3001+ params[i].type == VIR_DOMAIN_SCHED_FIELD_UINT) {
3002+ val = params[i].value.ui;
3003+ if (val >= USHRT_MAX) {
3004+ snprintf(buf, sizeof(buf), _("Credit scheduler cap parameter (%d) is out of range (0-65534)"), val);
3005+ virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__, buf, val);
3006+ return(-1);
3007+ }
3008+ op_dom.u.getschedinfo.u.credit.cap = val;
3009+ } else {
3010+ virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
3011+ "Credit scheduler accepts 'cap' and 'weight' integer parameters",
3012+ 0);
3013+ return(-1);
3014+ }
3015+ }
3016+
3017+ ret = xenHypervisorDoV2Dom(priv->handle, &op_dom);
3018+ if (ret < 0)
3019+ return -1;
3020+ break;
3021+ }
3022+ default:
3023+ virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
3024+ "Unknown scheduler", op_sys.u.getschedulerid.sched_id);
3025+ return -1;
3026+ }
3027+ }
3028+
3029+ return 0;
3030+}
3031+
3032+
3033+int
3034+xenHypervisorDomainBlockStats (virDomainPtr dom,
3035+ const char *path,
3036+ struct _virDomainBlockStats *stats)
3037+{
3038+# ifdef __linux__
3039+ xenUnifiedPrivatePtr priv;
3040+ int ret;
3041+
3042+ priv = (xenUnifiedPrivatePtr) dom->conn->privateData;
3043+ xenUnifiedLock(priv);
3044+ /* Need to lock because it hits the xenstore handle :-( */
3045+ ret = xenLinuxDomainBlockStats (priv, dom, path, stats);
3046+ xenUnifiedUnlock(priv);
3047+ return ret;
3048+# else
3049+ virXenErrorFunc(VIR_ERR_NO_SUPPORT, __FUNCTION__,
3050+ "block statistics not supported on this platform",
3051+ dom->id);
3052+ return -1;
3053+# endif
3054+}
3055+
3056+/* Paths have the form vif<domid>.<n> (this interface checks that
3057+ * <domid> is the real domain ID and returns an error if not).
3058+ *
3059+ * In future we may allow you to query bridge stats (virbrX or
3060+ * xenbrX), but that will probably be through a separate
3061+ * virNetwork interface, as yet not decided.
3062+ */
3063+int
3064+xenHypervisorDomainInterfaceStats (virDomainPtr dom,
3065+ const char *path,
3066+ struct _virDomainInterfaceStats *stats)
3067+{
3068+# ifdef __linux__
3069+ int rqdomid, device;
3070+
3071+ /* Verify that the vif requested is one belonging to the current
3072+ * domain.
3073+ */
3074+ if (sscanf (path, "vif%d.%d", &rqdomid, &device) != 2) {
3075+ virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
3076+ "invalid path, should be vif<domid>.<n>.", 0);
3077+ return -1;
3078+ }
3079+ if (rqdomid != dom->id) {
3080+ virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
3081+ "invalid path, vif<domid> should match this domain ID", 0);
3082+ return -1;
3083+ }
3084+
3085+ return linuxDomainInterfaceStats(path, stats);
3086+# else
3087+ virXenErrorFunc(VIR_ERR_NO_SUPPORT, __FUNCTION__,
3088+ "/proc/net/dev: Interface not found", 0);
3089+ return -1;
3090+# endif
3091+}
3092+
3093+/**
3094+ * virXen_pausedomain:
3095+ * @handle: the hypervisor handle
3096+ * @id: the domain id
3097+ *
3098+ * Do a low level hypercall to pause the domain
3099+ *
3100+ * Returns 0 or -1 in case of failure
3101+ */
3102+static int
3103+virXen_pausedomain(int handle, int id)
3104+{
3105+ int ret = -1;
3106+
3107+ if (hypervisor_version > 1) {
3108+ xen_op_v2_dom op;
3109+
3110+ memset(&op, 0, sizeof(op));
3111+ op.cmd = XEN_V2_OP_PAUSEDOMAIN;
3112+ op.domain = (domid_t) id;
3113+ ret = xenHypervisorDoV2Dom(handle, &op);
3114+ } else if (hypervisor_version == 1) {
3115+ xen_op_v1 op;
3116+
3117+ memset(&op, 0, sizeof(op));
3118+ op.cmd = XEN_V1_OP_PAUSEDOMAIN;
3119+ op.u.domain.domain = (domid_t) id;
3120+ ret = xenHypervisorDoV1Op(handle, &op);
3121+ } else if (hypervisor_version == 0) {
3122+ xen_op_v0 op;
3123+
3124+ memset(&op, 0, sizeof(op));
3125+ op.cmd = XEN_V0_OP_PAUSEDOMAIN;
3126+ op.u.domain.domain = (domid_t) id;
3127+ ret = xenHypervisorDoV0Op(handle, &op);
3128+ }
3129+ return(ret);
3130+}
3131+
3132+/**
3133+ * virXen_unpausedomain:
3134+ * @handle: the hypervisor handle
3135+ * @id: the domain id
3136+ *
3137+ * Do a low level hypercall to unpause the domain
3138+ *
3139+ * Returns 0 or -1 in case of failure
3140+ */
3141+static int
3142+virXen_unpausedomain(int handle, int id)
3143+{
3144+ int ret = -1;
3145+
3146+ if (hypervisor_version > 1) {
3147+ xen_op_v2_dom op;
3148+
3149+ memset(&op, 0, sizeof(op));
3150+ op.cmd = XEN_V2_OP_UNPAUSEDOMAIN;
3151+ op.domain = (domid_t) id;
3152+ ret = xenHypervisorDoV2Dom(handle, &op);
3153+ } else if (hypervisor_version == 1) {
3154+ xen_op_v1 op;
3155+
3156+ memset(&op, 0, sizeof(op));
3157+ op.cmd = XEN_V1_OP_UNPAUSEDOMAIN;
3158+ op.u.domain.domain = (domid_t) id;
3159+ ret = xenHypervisorDoV1Op(handle, &op);
3160+ } else if (hypervisor_version == 0) {
3161+ xen_op_v0 op;
3162+
3163+ memset(&op, 0, sizeof(op));
3164+ op.cmd = XEN_V0_OP_UNPAUSEDOMAIN;
3165+ op.u.domain.domain = (domid_t) id;
3166+ ret = xenHypervisorDoV0Op(handle, &op);
3167+ }
3168+ return(ret);
3169+}
3170+
3171+/**
3172+ * virXen_destroydomain:
3173+ * @handle: the hypervisor handle
3174+ * @id: the domain id
3175+ *
3176+ * Do a low level hypercall to destroy the domain
3177+ *
3178+ * Returns 0 or -1 in case of failure
3179+ */
3180+static int
3181+virXen_destroydomain(int handle, int id)
3182+{
3183+ int ret = -1;
3184+
3185+ if (hypervisor_version > 1) {
3186+ xen_op_v2_dom op;
3187+
3188+ memset(&op, 0, sizeof(op));
3189+ op.cmd = XEN_V2_OP_DESTROYDOMAIN;
3190+ op.domain = (domid_t) id;
3191+ ret = xenHypervisorDoV2Dom(handle, &op);
3192+ } else if (hypervisor_version == 1) {
3193+ xen_op_v1 op;
3194+
3195+ memset(&op, 0, sizeof(op));
3196+ op.cmd = XEN_V1_OP_DESTROYDOMAIN;
3197+ op.u.domain.domain = (domid_t) id;
3198+ ret = xenHypervisorDoV1Op(handle, &op);
3199+ } else if (hypervisor_version == 0) {
3200+ xen_op_v0 op;
3201+
3202+ memset(&op, 0, sizeof(op));
3203+ op.cmd = XEN_V0_OP_DESTROYDOMAIN;
3204+ op.u.domain.domain = (domid_t) id;
3205+ ret = xenHypervisorDoV0Op(handle, &op);
3206+ }
3207+ return(ret);
3208+}
3209+
3210+/**
3211+ * virXen_setmaxmem:
3212+ * @handle: the hypervisor handle
3213+ * @id: the domain id
3214+ * @memory: the amount of memory in kilobytes
3215+ *
3216+ * Do a low level hypercall to change the max memory amount
3217+ *
3218+ * Returns 0 or -1 in case of failure
3219+ */
3220+static int
3221+virXen_setmaxmem(int handle, int id, unsigned long memory)
3222+{
3223+ int ret = -1;
3224+
3225+ if (hypervisor_version > 1) {
3226+ xen_op_v2_dom op;
3227+
3228+ memset(&op, 0, sizeof(op));
3229+ op.cmd = XEN_V2_OP_SETMAXMEM;
3230+ op.domain = (domid_t) id;
3231+ if (dom_interface_version < 5)
3232+ op.u.setmaxmem.maxmem = memory;
3233+ else
3234+ op.u.setmaxmemd5.maxmem = memory;
3235+ ret = xenHypervisorDoV2Dom(handle, &op);
3236+ } else if (hypervisor_version == 1) {
3237+ xen_op_v1 op;
3238+
3239+ memset(&op, 0, sizeof(op));
3240+ op.cmd = XEN_V1_OP_SETMAXMEM;
3241+ op.u.setmaxmem.domain = (domid_t) id;
3242+ op.u.setmaxmem.maxmem = memory;
3243+ ret = xenHypervisorDoV1Op(handle, &op);
3244+ } else if (hypervisor_version == 0) {
3245+ xen_op_v0 op;
3246+
3247+ memset(&op, 0, sizeof(op));
3248+ op.cmd = XEN_V0_OP_SETMAXMEM;
3249+ op.u.setmaxmem.domain = (domid_t) id;
3250+ op.u.setmaxmem.maxmem = memory;
3251+ ret = xenHypervisorDoV0Op(handle, &op);
3252+ }
3253+ return(ret);
3254+}
3255+
3256+/**
3257+ * virXen_setmaxvcpus:
3258+ * @handle: the hypervisor handle
3259+ * @id: the domain id
3260+ * @vcpus: the numbers of vcpus
3261+ *
3262+ * Do a low level hypercall to change the max vcpus amount
3263+ *
3264+ * Returns 0 or -1 in case of failure
3265+ */
3266+static int
3267+virXen_setmaxvcpus(int handle, int id, unsigned int vcpus)
3268+{
3269+ int ret = -1;
3270+
3271+ if (hypervisor_version > 1) {
3272+ xen_op_v2_dom op;
3273+
3274+ memset(&op, 0, sizeof(op));
3275+ op.cmd = XEN_V2_OP_SETMAXVCPU;
3276+ op.domain = (domid_t) id;
3277+ op.u.setmaxvcpu.maxvcpu = vcpus;
3278+ ret = xenHypervisorDoV2Dom(handle, &op);
3279+ } else if (hypervisor_version == 1) {
3280+ xen_op_v1 op;
3281+
3282+ memset(&op, 0, sizeof(op));
3283+ op.cmd = XEN_V1_OP_SETMAXVCPU;
3284+ op.u.setmaxvcpu.domain = (domid_t) id;
3285+ op.u.setmaxvcpu.maxvcpu = vcpus;
3286+ ret = xenHypervisorDoV1Op(handle, &op);
3287+ } else if (hypervisor_version == 0) {
3288+ xen_op_v0 op;
3289+
3290+ memset(&op, 0, sizeof(op));
3291+ op.cmd = XEN_V0_OP_SETMAXVCPU;
3292+ op.u.setmaxvcpu.domain = (domid_t) id;
3293+ op.u.setmaxvcpu.maxvcpu = vcpus;
3294+ ret = xenHypervisorDoV0Op(handle, &op);
3295+ }
3296+ return(ret);
3297+}
3298+
3299+/**
3300+ * virXen_setvcpumap:
3301+ * @handle: the hypervisor handle
3302+ * @id: the domain id
3303+ * @vcpu: the vcpu to map
3304+ * @cpumap: the bitmap for this vcpu
3305+ * @maplen: the size of the bitmap in bytes
3306+ *
3307+ * Do a low level hypercall to change the pinning for vcpu
3308+ *
3309+ * Returns 0 or -1 in case of failure
3310+ */
3311+static int
3312+virXen_setvcpumap(int handle, int id, unsigned int vcpu,
3313+ unsigned char * cpumap, int maplen)
3314+{
3315+ int ret = -1;
3316+ unsigned char *new = NULL;
3317+ unsigned char *bitmap = NULL;
3318+ uint32_t nr_cpus;
3319+
3320+ if (hypervisor_version > 1) {
3321+ xen_op_v2_dom op;
3322+
3323+ if (lock_pages(cpumap, maplen) < 0) {
3324+ virXenError(VIR_ERR_XEN_CALL, " locking");
3325+ return (-1);
3326+ }
3327+ memset(&op, 0, sizeof(op));
3328+ op.cmd = XEN_V2_OP_SETVCPUMAP;
3329+ op.domain = (domid_t) id;
3330+
3331+ /* The allocated memory to cpumap must be 'sizeof(uint64_t)' byte *
3332+ * for Xen, and also nr_cpus must be 'sizeof(uint64_t) * 8' */
3333+ if (maplen < 8) {
3334+ if (VIR_ALLOC_N(new, sizeof(uint64_t)) < 0) {
3335+ virReportOOMError();
3336+ return (-1);
3337+ }
3338+ memcpy(new, cpumap, maplen);
3339+ bitmap = new;
3340+ nr_cpus = sizeof(uint64_t) * 8;
3341+ } else {
3342+ bitmap = cpumap;
3343+ nr_cpus = maplen * 8;
3344+ }
3345+
3346+ if (dom_interface_version < 5) {
3347+ op.u.setvcpumap.vcpu = vcpu;
3348+ op.u.setvcpumap.cpumap.bitmap = bitmap;
3349+ op.u.setvcpumap.cpumap.nr_cpus = nr_cpus;
3350+ } else {
3351+ op.u.setvcpumapd5.vcpu = vcpu;
3352+ op.u.setvcpumapd5.cpumap.bitmap.v = bitmap;
3353+ op.u.setvcpumapd5.cpumap.nr_cpus = nr_cpus;
3354+ }
3355+ ret = xenHypervisorDoV2Dom(handle, &op);
3356+ VIR_FREE(new);
3357+
3358+ if (unlock_pages(cpumap, maplen) < 0) {
3359+ virXenError(VIR_ERR_XEN_CALL, " release");
3360+ ret = -1;
3361+ }
3362+ } else {
3363+ cpumap_t xen_cpumap; /* limited to 64 CPUs in old hypervisors */
3364+ uint64_t *pm = &xen_cpumap;
3365+ int j;
3366+
3367+ if ((maplen > (int)sizeof(cpumap_t)) || (sizeof(cpumap_t) & 7))
3368+ return (-1);
3369+
3370+ memset(pm, 0, sizeof(cpumap_t));
3371+ for (j = 0; j < maplen; j++)
3372+ *(pm + (j / 8)) |= cpumap[j] << (8 * (j & 7));
3373+
3374+ if (hypervisor_version == 1) {
3375+ xen_op_v1 op;
3376+
3377+ memset(&op, 0, sizeof(op));
3378+ op.cmd = XEN_V1_OP_SETVCPUMAP;
3379+ op.u.setvcpumap.domain = (domid_t) id;
3380+ op.u.setvcpumap.vcpu = vcpu;
3381+ op.u.setvcpumap.cpumap = xen_cpumap;
3382+ ret = xenHypervisorDoV1Op(handle, &op);
3383+ } else if (hypervisor_version == 0) {
3384+ xen_op_v0 op;
3385+
3386+ memset(&op, 0, sizeof(op));
3387+ op.cmd = XEN_V0_OP_SETVCPUMAP;
3388+ op.u.setvcpumap.domain = (domid_t) id;
3389+ op.u.setvcpumap.vcpu = vcpu;
3390+ op.u.setvcpumap.cpumap = xen_cpumap;
3391+ ret = xenHypervisorDoV0Op(handle, &op);
3392+ }
3393+ }
3394+ return(ret);
3395+}
3396+#endif /* !PROXY*/
3397+
3398+/**
3399+ * virXen_getvcpusinfo:
3400+ * @handle: the hypervisor handle
3401+ * @id: the domain id
3402+ * @vcpu: the vcpu to map
3403+ * @cpumap: the bitmap for this vcpu
3404+ * @maplen: the size of the bitmap in bytes
3405+ *
3406+ * Do a low level hypercall to change the pinning for vcpu
3407+ *
3408+ * Returns 0 or -1 in case of failure
3409+ */
3410+static int
3411+virXen_getvcpusinfo(int handle, int id, unsigned int vcpu, virVcpuInfoPtr ipt,
3412+ unsigned char *cpumap, int maplen)
3413+{
3414+ int ret = -1;
3415+
3416+ if (hypervisor_version > 1) {
3417+ xen_op_v2_dom op;
3418+
3419+ memset(&op, 0, sizeof(op));
3420+ op.cmd = XEN_V2_OP_GETVCPUINFO;
3421+ op.domain = (domid_t) id;
3422+ if (dom_interface_version < 5)
3423+ op.u.getvcpuinfo.vcpu = (uint16_t) vcpu;
3424+ else
3425+ op.u.getvcpuinfod5.vcpu = (uint16_t) vcpu;
3426+ ret = xenHypervisorDoV2Dom(handle, &op);
3427+
3428+ if (ret < 0)
3429+ return(-1);
3430+ ipt->number = vcpu;
3431+ if (dom_interface_version < 5) {
3432+ if (op.u.getvcpuinfo.online) {
3433+ if (op.u.getvcpuinfo.running)
3434+ ipt->state = VIR_VCPU_RUNNING;
3435+ if (op.u.getvcpuinfo.blocked)
3436+ ipt->state = VIR_VCPU_BLOCKED;
3437+ } else
3438+ ipt->state = VIR_VCPU_OFFLINE;
3439+
3440+ ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
3441+ ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
3442+ } else {
3443+ if (op.u.getvcpuinfod5.online) {
3444+ if (op.u.getvcpuinfod5.running)
3445+ ipt->state = VIR_VCPU_RUNNING;
3446+ if (op.u.getvcpuinfod5.blocked)
3447+ ipt->state = VIR_VCPU_BLOCKED;
3448+ } else
3449+ ipt->state = VIR_VCPU_OFFLINE;
3450+
3451+ ipt->cpuTime = op.u.getvcpuinfod5.cpu_time;
3452+ ipt->cpu = op.u.getvcpuinfod5.online ? (int)op.u.getvcpuinfod5.cpu : -1;
3453+ }
3454+ if ((cpumap != NULL) && (maplen > 0)) {
3455+ if (lock_pages(cpumap, maplen) < 0) {
3456+ virXenError(VIR_ERR_XEN_CALL, " locking");
3457+ return (-1);
3458+ }
3459+ memset(cpumap, 0, maplen);
3460+ memset(&op, 0, sizeof(op));
3461+ op.cmd = XEN_V2_OP_GETVCPUMAP;
3462+ op.domain = (domid_t) id;
3463+ if (dom_interface_version < 5) {
3464+ op.u.getvcpumap.vcpu = vcpu;
3465+ op.u.getvcpumap.cpumap.bitmap = cpumap;
3466+ op.u.getvcpumap.cpumap.nr_cpus = maplen * 8;
3467+ } else {
3468+ op.u.getvcpumapd5.vcpu = vcpu;
3469+ op.u.getvcpumapd5.cpumap.bitmap.v = cpumap;
3470+ op.u.getvcpumapd5.cpumap.nr_cpus = maplen * 8;
3471+ }
3472+ ret = xenHypervisorDoV2Dom(handle, &op);
3473+ if (unlock_pages(cpumap, maplen) < 0) {
3474+ virXenError(VIR_ERR_XEN_CALL, " release");
3475+ ret = -1;
3476+ }
3477+ }
3478+ } else {
3479+ int mapl = maplen;
3480+ int cpu;
3481+
3482+ if (maplen > (int)sizeof(cpumap_t))
3483+ mapl = (int)sizeof(cpumap_t);
3484+
3485+ if (hypervisor_version == 1) {
3486+ xen_op_v1 op;
3487+
3488+ memset(&op, 0, sizeof(op));
3489+ op.cmd = XEN_V1_OP_GETVCPUINFO;
3490+ op.u.getvcpuinfo.domain = (domid_t) id;
3491+ op.u.getvcpuinfo.vcpu = vcpu;
3492+ ret = xenHypervisorDoV1Op(handle, &op);
3493+ if (ret < 0)
3494+ return(-1);
3495+ ipt->number = vcpu;
3496+ if (op.u.getvcpuinfo.online) {
3497+ if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
3498+ if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
3499+ }
3500+ else ipt->state = VIR_VCPU_OFFLINE;
3501+ ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
3502+ ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
3503+ if ((cpumap != NULL) && (maplen > 0)) {
3504+ for (cpu = 0; cpu < (mapl * 8); cpu++) {
3505+ if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
3506+ VIR_USE_CPU(cpumap, cpu);
3507+ }
3508+ }
3509+ } else if (hypervisor_version == 0) {
3510+ xen_op_v1 op;
3511+
3512+ memset(&op, 0, sizeof(op));
3513+ op.cmd = XEN_V0_OP_GETVCPUINFO;
3514+ op.u.getvcpuinfo.domain = (domid_t) id;
3515+ op.u.getvcpuinfo.vcpu = vcpu;
3516+ ret = xenHypervisorDoV0Op(handle, &op);
3517+ if (ret < 0)
3518+ return(-1);
3519+ ipt->number = vcpu;
3520+ if (op.u.getvcpuinfo.online) {
3521+ if (op.u.getvcpuinfo.running) ipt->state = VIR_VCPU_RUNNING;
3522+ if (op.u.getvcpuinfo.blocked) ipt->state = VIR_VCPU_BLOCKED;
3523+ }
3524+ else ipt->state = VIR_VCPU_OFFLINE;
3525+ ipt->cpuTime = op.u.getvcpuinfo.cpu_time;
3526+ ipt->cpu = op.u.getvcpuinfo.online ? (int)op.u.getvcpuinfo.cpu : -1;
3527+ if ((cpumap != NULL) && (maplen > 0)) {
3528+ for (cpu = 0; cpu < (mapl * 8); cpu++) {
3529+ if (op.u.getvcpuinfo.cpumap & ((uint64_t)1<<cpu))
3530+ VIR_USE_CPU(cpumap, cpu);
3531+ }
3532+ }
3533+ }
3534+ }
3535+ return(ret);
3536+}
3537+
3538+/**
3539+ * xenHypervisorInit:
3540+ *
3541+ * Initialize the hypervisor layer. Try to detect the kind of interface
3542+ * used i.e. pre or post changeset 10277
3543+ */
3544+int
3545+xenHypervisorInit(void)
3546+{
3547+ int fd, ret, cmd, errcode;
3548+ hypercall_t hc;
3549+ v0_hypercall_t v0_hc;
3550+ xen_getdomaininfo info;
3551+ virVcpuInfoPtr ipt = NULL;
3552+
3553+ if (initialized) {
3554+ if (hypervisor_version == -1)
3555+ return (-1);
3556+ return(0);
3557+ }
3558+ initialized = 1;
3559+ in_init = 1;
3560+
3561+ /* Compile regular expressions used by xenHypervisorGetCapabilities.
3562+ * Note that errors here are really internal errors since these
3563+ * regexps should never fail to compile.
3564+ */
3565+ errcode = regcomp (&flags_hvm_rec, flags_hvm_re, REG_EXTENDED);
3566+ if (errcode != 0) {
3567+ char error[100];
3568+ regerror (errcode, &flags_hvm_rec, error, sizeof error);
3569+ regfree (&flags_hvm_rec);
3570+ virXenError(VIR_ERR_INTERNAL_ERROR, "%s", error);
3571+ in_init = 0;
3572+ return -1;
3573+ }
3574+ errcode = regcomp (&flags_pae_rec, flags_pae_re, REG_EXTENDED);
3575+ if (errcode != 0) {
3576+ char error[100];
3577+ regerror (errcode, &flags_pae_rec, error, sizeof error);
3578+ regfree (&flags_pae_rec);
3579+ regfree (&flags_hvm_rec);
3580+ virXenError(VIR_ERR_INTERNAL_ERROR, "%s", error);
3581+ in_init = 0;
3582+ return -1;
3583+ }
3584+ errcode = regcomp (&xen_cap_rec, xen_cap_re, REG_EXTENDED);
3585+ if (errcode != 0) {
3586+ char error[100];
3587+ regerror (errcode, &xen_cap_rec, error, sizeof error);
3588+ regfree (&xen_cap_rec);
3589+ regfree (&flags_pae_rec);
3590+ regfree (&flags_hvm_rec);
3591+ virXenError(VIR_ERR_INTERNAL_ERROR, "%s", error);
3592+ in_init = 0;
3593+ return -1;
3594+ }
3595+
3596+ /* Xen hypervisor version detection begins. */
3597+ ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
3598+ if (ret < 0) {
3599+ hypervisor_version = -1;
3600+ return(-1);
3601+ }
3602+ fd = ret;
3603+
3604+ /*
3605+ * The size of the hypervisor call block changed July 2006
3606+ * this detect if we are using the new or old hypercall_t structure
3607+ */
3608+ hc.op = __HYPERVISOR_xen_version;
3609+ hc.arg[0] = (unsigned long) XENVER_version;
3610+ hc.arg[1] = 0;
3611+
3612+ cmd = IOCTL_PRIVCMD_HYPERCALL;
3613+ ret = ioctl(fd, cmd, (unsigned long) &hc);
3614+
3615+ if ((ret != -1) && (ret != 0)) {
3616+ DEBUG("Using new hypervisor call: %X", ret);
3617+ hv_version = ret;
3618+ xen_ioctl_hypercall_cmd = cmd;
3619+ goto detect_v2;
3620+ }
3621+
3622+#ifndef __sun
3623+ /*
3624+ * check if the old hypercall are actually working
3625+ */
3626+ v0_hc.op = __HYPERVISOR_xen_version;
3627+ v0_hc.arg[0] = (unsigned long) XENVER_version;
3628+ v0_hc.arg[1] = 0;
3629+ cmd = _IOC(_IOC_NONE, 'P', 0, sizeof(v0_hypercall_t));
3630+ ret = ioctl(fd, cmd, (unsigned long) &v0_hc);
3631+ if ((ret != -1) && (ret != 0)) {
3632+ DEBUG("Using old hypervisor call: %X", ret);
3633+ hv_version = ret;
3634+ xen_ioctl_hypercall_cmd = cmd;
3635+ hypervisor_version = 0;
3636+ goto done;
3637+ }
3638+#endif
3639+
3640+ /*
3641+ * we failed to make any hypercall
3642+ */
3643+
3644+ hypervisor_version = -1;
3645+ virXenError(VIR_ERR_XEN_CALL, " ioctl %lu",
3646+ (unsigned long) IOCTL_PRIVCMD_HYPERCALL);
3647+ close(fd);
3648+ in_init = 0;
3649+ return(-1);
3650+
3651+ detect_v2:
3652+ /*
3653+ * The hypercalls were refactored into 3 different section in August 2006
3654+ * Try to detect if we are running a version post 3.0.2 with the new ones
3655+ * or the old ones
3656+ */
3657+ hypervisor_version = 2;
3658+
3659+ if (VIR_ALLOC(ipt) < 0) {
3660+ virReportOOMError();
3661+ return(-1);
3662+ }
3663+ /* Currently consider RHEL5.0 Fedora7, xen-3.1, and xen-unstable */
3664+ sys_interface_version = 2; /* XEN_SYSCTL_INTERFACE_VERSION */
3665+ if (virXen_getdomaininfo(fd, 0, &info) == 1) {
3666+ /* RHEL 5.0 */
3667+ dom_interface_version = 3; /* XEN_DOMCTL_INTERFACE_VERSION */
3668+ if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
3669+ DEBUG0("Using hypervisor call v2, sys ver2 dom ver3");
3670+ goto done;
3671+ }
3672+ /* Fedora 7 */
3673+ dom_interface_version = 4; /* XEN_DOMCTL_INTERFACE_VERSION */
3674+ if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
3675+ DEBUG0("Using hypervisor call v2, sys ver2 dom ver4");
3676+ goto done;
3677+ }
3678+ }
3679+
3680+ sys_interface_version = 3; /* XEN_SYSCTL_INTERFACE_VERSION */
3681+ if (virXen_getdomaininfo(fd, 0, &info) == 1) {
3682+ /* xen-3.1 */
3683+ dom_interface_version = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
3684+ if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
3685+ DEBUG0("Using hypervisor call v2, sys ver3 dom ver5");
3686+ goto done;
3687+ }
3688+ }
3689+
3690+ sys_interface_version = 4; /* XEN_SYSCTL_INTERFACE_VERSION */
3691+ if (virXen_getdomaininfo(fd, 0, &info) == 1) {
3692+ /* Fedora 8 */
3693+ dom_interface_version = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
3694+ if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
3695+ DEBUG0("Using hypervisor call v2, sys ver4 dom ver5");
3696+ goto done;
3697+ }
3698+ }
3699+
3700+ sys_interface_version = 6; /* XEN_SYSCTL_INTERFACE_VERSION */
3701+ if (virXen_getdomaininfo(fd, 0, &info) == 1) {
3702+ /* Xen 3.2, Fedora 9 */
3703+ dom_interface_version = 5; /* XEN_DOMCTL_INTERFACE_VERSION */
3704+ if (virXen_getvcpusinfo(fd, 0, 0, ipt, NULL, 0) == 0){
3705+ DEBUG0("Using hypervisor call v2, sys ver6 dom ver5");
3706+ goto done;
3707+ }
3708+ }
3709+
3710+ /* Xen 4.0 */
3711+ sys_interface_version = 7; /* XEN_SYSCTL_INTERFACE_VERSION */
3712+ if (virXen_getdomaininfo(fd, 0, &info) == 1) {
3713+ dom_interface_version = 6; /* XEN_DOMCTL_INTERFACE_VERSION */
3714+ DEBUG0("Using hypervisor call v2, sys ver7 dom ver6");
3715+ goto done;
3716+ }
3717+
3718+ hypervisor_version = 1;
3719+ sys_interface_version = -1;
3720+ if (virXen_getdomaininfo(fd, 0, &info) == 1) {
3721+ DEBUG0("Using hypervisor call v1");
3722+ goto done;
3723+ }
3724+
3725+ /*
3726+ * we failed to make the getdomaininfolist hypercall
3727+ */
3728+
3729+ DEBUG0("Failed to find any Xen hypervisor method");
3730+ hypervisor_version = -1;
3731+ virXenError(VIR_ERR_XEN_CALL, " ioctl %lu",
3732+ (unsigned long)IOCTL_PRIVCMD_HYPERCALL);
3733+ close(fd);
3734+ in_init = 0;
3735+ VIR_FREE(ipt);
3736+ return(-1);
3737+
3738+ done:
3739+ close(fd);
3740+ in_init = 0;
3741+ VIR_FREE(ipt);
3742+ return(0);
3743+}
3744+
3745+/**
3746+ * xenHypervisorOpen:
3747+ * @conn: pointer to the connection block
3748+ * @name: URL for the target, NULL for local
3749+ * @flags: combination of virDrvOpenFlag(s)
3750+ *
3751+ * Connects to the Xen hypervisor.
3752+ *
3753+ * Returns 0 or -1 in case of error.
3754+ */
3755+virDrvOpenStatus
3756+xenHypervisorOpen(virConnectPtr conn,
3757+ virConnectAuthPtr auth ATTRIBUTE_UNUSED,
3758+ int flags ATTRIBUTE_UNUSED)
3759+{
3760+ int ret;
3761+ xenUnifiedPrivatePtr priv = (xenUnifiedPrivatePtr) conn->privateData;
3762+
3763+ if (initialized == 0)
3764+ if (xenHypervisorInit() == -1)
3765+ return -1;
3766+
3767+ priv->handle = -1;
3768+
3769+ ret = open(XEN_HYPERVISOR_SOCKET, O_RDWR);
3770+ if (ret < 0) {
3771+ virXenError(VIR_ERR_NO_XEN, "%s", XEN_HYPERVISOR_SOCKET);
3772+ return (-1);
3773+ }
3774+
3775+ priv->handle = ret;
3776+
3777+ return(0);
3778+}
3779+
3780+/**
3781+ * xenHypervisorClose:
3782+ * @conn: pointer to the connection block
3783+ *
3784+ * Close the connection to the Xen hypervisor.
3785+ *
3786+ * Returns 0 in case of success or -1 in case of error.
3787+ */
3788+int
3789+xenHypervisorClose(virConnectPtr conn)
3790+{
3791+ int ret;
3792+ xenUnifiedPrivatePtr priv;
3793+
3794+ if (conn == NULL)
3795+ return (-1);
3796+
3797+ priv = (xenUnifiedPrivatePtr) conn->privateData;
3798+
3799+ if (priv->handle < 0)
3800+ return -1;
3801+
3802+ ret = close(priv->handle);
3803+ if (ret < 0)
3804+ return (-1);
3805+
3806+ return (0);
3807+}
3808+
3809+
3810+/**
3811+ * xenHypervisorGetVersion:
3812+ * @conn: pointer to the connection block
3813+ * @hvVer: where to store the version
3814+ *
3815+ * Call the hypervisor to extracts his own internal API version
3816+ *
3817+ * Returns 0 in case of success, -1 in case of error
3818+ */
3819+int
3820+xenHypervisorGetVersion(virConnectPtr conn, unsigned long *hvVer)
3821+{
3822+ xenUnifiedPrivatePtr priv;
3823+
3824+ if (conn == NULL)
3825+ return -1;
3826+ priv = (xenUnifiedPrivatePtr) conn->privateData;
3827+ if (priv->handle < 0 || hvVer == NULL)
3828+ return (-1);
3829+ *hvVer = (hv_version >> 16) * 1000000 + (hv_version & 0xFFFF) * 1000;
3830+ return(0);
3831+}
3832+
3833+struct guest_arch {
3834+ const char *model;
3835+ int bits;
3836+ int hvm;
3837+ int pae;
3838+ int nonpae;
3839+ int ia64_be;
3840+};
3841+
3842+
3843+static virCapsPtr
3844+xenHypervisorBuildCapabilities(virConnectPtr conn,
3845+ const char *hostmachine,
3846+ int host_pae,
3847+ const char *hvm_type,
3848+ struct guest_arch *guest_archs,
3849+ int nr_guest_archs) {
3850+ virCapsPtr caps;
3851+ int i;
3852+ int hv_major = hv_version >> 16;
3853+ int hv_minor = hv_version & 0xFFFF;
3854+
3855+ if ((caps = virCapabilitiesNew(hostmachine, 1, 1)) == NULL)
3856+ goto no_memory;
3857+
3858+ virCapabilitiesSetMacPrefix(caps, (unsigned char[]){ 0x00, 0x16, 0x3e });
3859+
3860+ if (hvm_type && STRNEQ(hvm_type, "") &&
3861+ virCapabilitiesAddHostFeature(caps, hvm_type) < 0)
3862+ goto no_memory;
3863+ if (host_pae &&
3864+ virCapabilitiesAddHostFeature(caps, "pae") < 0)
3865+ goto no_memory;
3866+
3867+
3868+ if (virCapabilitiesAddHostMigrateTransport(caps,
3869+ "xenmigr") < 0)
3870+ goto no_memory;
3871+
3872+
3873+ if (sys_interface_version >= SYS_IFACE_MIN_VERS_NUMA) {
3874+ if (xenDaemonNodeGetTopology(conn, caps) != 0) {
3875+ virCapabilitiesFree(caps);
3876+ return NULL;
3877+ }
3878+ }
3879+
3880+ for (i = 0; i < nr_guest_archs; ++i) {
3881+ virCapsGuestPtr guest;
3882+ char const *const xen_machines[] = {guest_archs[i].hvm ? "xenfv" : "xenpv"};
3883+ virCapsGuestMachinePtr *machines;
3884+
3885+ if ((machines = virCapabilitiesAllocMachines(xen_machines, 1)) == NULL)
3886+ goto no_memory;
3887+
3888+ if ((guest = virCapabilitiesAddGuest(caps,
3889+ guest_archs[i].hvm ? "hvm" : "xen",
3890+ guest_archs[i].model,
3891+ guest_archs[i].bits,
3892+ (STREQ(hostmachine, "x86_64") ?
3893+ "/usr/lib64/xen/bin/qemu-dm" :
3894+ "/usr/lib/xen/bin/qemu-dm"),
3895+ (guest_archs[i].hvm ?
3896+ "/usr/lib/xen/boot/hvmloader" :
3897+ NULL),
3898+ 1,
3899+ machines)) == NULL) {
3900+ virCapabilitiesFreeMachines(machines, 1);
3901+ goto no_memory;
3902+ }
3903+ machines = NULL;
3904+
3905+ if (virCapabilitiesAddGuestDomain(guest,
3906+ "xen",
3907+ NULL,
3908+ NULL,
3909+ 0,
3910+ NULL) == NULL)
3911+ goto no_memory;
3912+
3913+ if (guest_archs[i].pae &&
3914+ virCapabilitiesAddGuestFeature(guest,
3915+ "pae",
3916+ 1,
3917+ 0) == NULL)
3918+ goto no_memory;
3919+
3920+ if (guest_archs[i].nonpae &&
3921+ virCapabilitiesAddGuestFeature(guest,
3922+ "nonpae",
3923+ 1,
3924+ 0) == NULL)
3925+ goto no_memory;
3926+
3927+ if (guest_archs[i].ia64_be &&
3928+ virCapabilitiesAddGuestFeature(guest,
3929+ "ia64_be",
3930+ 1,
3931+ 0) == NULL)
3932+ goto no_memory;
3933+
3934+ if (guest_archs[i].hvm) {
3935+ if (virCapabilitiesAddGuestFeature(guest,
3936+ "acpi",
3937+ 1, 1) == NULL)
3938+ goto no_memory;
3939+
3940+ // In Xen 3.1.0, APIC is always on and can't be toggled
3941+ if (virCapabilitiesAddGuestFeature(guest,
3942+ "apic",
3943+ 1,
3944+ (hv_major > 3 &&
3945+ hv_minor > 0 ?
3946+ 0 : 1)) == NULL)
3947+ goto no_memory;
3948+ }
3949+ }
3950+
3951+ caps->defaultConsoleTargetType = VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_XEN;
3952+
3953+ return caps;
3954+
3955+ no_memory:
3956+ virCapabilitiesFree(caps);
3957+ return NULL;
3958+}
3959+
3960+#ifdef __sun
3961+
3962+static int
3963+get_cpu_flags(virConnectPtr conn, const char **hvm, int *pae, int *longmode)
3964+{
3965+ struct {
3966+ uint32_t r_eax, r_ebx, r_ecx, r_edx;
3967+ } regs;
3968+
3969+ char tmpbuf[20];
3970+ int ret = 0;
3971+ int fd;
3972+
3973+ /* returns -1, errno 22 if in 32-bit mode */
3974+ *longmode = (sysinfo(SI_ARCHITECTURE_64, tmpbuf, sizeof(tmpbuf)) != -1);
3975+
3976+ if ((fd = open("/dev/cpu/self/cpuid", O_RDONLY)) == -1 ||
3977+ pread(fd, &regs, sizeof(regs), 0) != sizeof(regs)) {
3978+ virReportSystemError(errno, "%s", _("could not read CPU flags"));
3979+ goto out;
3980+ }
3981+
3982+ *pae = 0;
3983+ *hvm = "";
3984+
3985+ if (STREQLEN((const char *)&regs.r_ebx, "AuthcAMDenti", 12)) {
3986+ if (pread(fd, &regs, sizeof (regs), 0x80000001) == sizeof (regs)) {
3987+ /* Read secure virtual machine bit (bit 2 of ECX feature ID) */
3988+ if ((regs.r_ecx >> 2) & 1) {
3989+ *hvm = "svm";
3990+ }
3991+ if ((regs.r_edx >> 6) & 1)
3992+ *pae = 1;
3993+ }
3994+ } else if (STREQLEN((const char *)&regs.r_ebx, "GenuntelineI", 12)) {
3995+ if (pread(fd, &regs, sizeof (regs), 0x00000001) == sizeof (regs)) {
3996+ /* Read VMXE feature bit (bit 5 of ECX feature ID) */
3997+ if ((regs.r_ecx >> 5) & 1)
3998+ *hvm = "vmx";
3999+ if ((regs.r_edx >> 6) & 1)
4000+ *pae = 1;
4001+ }
4002+ }
4003+
4004+ ret = 1;
4005+
4006+out:
4007+ if (fd != -1)
4008+ close(fd);
4009+ return ret;
4010+}
4011+
4012+static virCapsPtr
4013+xenHypervisorMakeCapabilitiesSunOS(virConnectPtr conn)
4014+{
4015+ struct guest_arch guest_arches[32];
4016+ int i = 0;
4017+ virCapsPtr caps = NULL;
4018+ struct utsname utsname;
4019+ int pae, longmode;
4020+ const char *hvm;
4021+
4022+ if (!get_cpu_flags(conn, &hvm, &pae, &longmode))
4023+ return NULL;
4024+
4025+ /* Really, this never fails - look at the man-page. */
4026+ uname (&utsname);
4027+
4028+ guest_arches[i].model = "i686";
4029+ guest_arches[i].bits = 32;
4030+ guest_arches[i].hvm = 0;
4031+ guest_arches[i].pae = pae;
4032+ guest_arches[i].nonpae = !pae;
4033+ guest_arches[i].ia64_be = 0;
4034+ i++;
4035+
4036+ if (longmode) {
4037+ guest_arches[i].model = "x86_64";
4038+ guest_arches[i].bits = 64;
4039+ guest_arches[i].hvm = 0;
4040+ guest_arches[i].pae = 0;
4041+ guest_arches[i].nonpae = 0;
4042+ guest_arches[i].ia64_be = 0;
4043+ i++;
4044+ }
4045+
4046+ if (hvm[0] != '\0') {
4047+ guest_arches[i].model = "i686";
4048+ guest_arches[i].bits = 32;
4049+ guest_arches[i].hvm = 1;
4050+ guest_arches[i].pae = pae;
4051+ guest_arches[i].nonpae = 1;
4052+ guest_arches[i].ia64_be = 0;
4053+ i++;
4054+
4055+ if (longmode) {
4056+ guest_arches[i].model = "x86_64";
4057+ guest_arches[i].bits = 64;
4058+ guest_arches[i].hvm = 1;
4059+ guest_arches[i].pae = 0;
4060+ guest_arches[i].nonpae = 0;
4061+ guest_arches[i].ia64_be = 0;
4062+ i++;
4063+ }
4064+ }
4065+
4066+ if ((caps = xenHypervisorBuildCapabilities(conn,
4067+ utsname.machine,
4068+ pae, hvm,
4069+ guest_arches, i)) == NULL)
4070+ virReportOOMError();
4071+
4072+ return caps;
4073+}
4074+
4075+#endif /* __sun */
4076+
4077+/**
4078+ * xenHypervisorMakeCapabilitiesInternal:
4079+ * @conn: pointer to the connection block
4080+ * @cpuinfo: file handle containing /proc/cpuinfo data, or NULL
4081+ * @capabilities: file handle containing /sys/hypervisor/properties/capabilities data, or NULL
4082+ *
4083+ * Return the capabilities of this hypervisor.
4084+ */
4085+virCapsPtr
4086+xenHypervisorMakeCapabilitiesInternal(virConnectPtr conn,
4087+ const char *hostmachine,
4088+ FILE *cpuinfo, FILE *capabilities)
4089+{
4090+ char line[1024], *str, *token;
4091+ regmatch_t subs[4];
4092+ char *saveptr = NULL;
4093+ int i;
4094+
4095+ char hvm_type[4] = ""; /* "vmx" or "svm" (or "" if not in CPU). */
4096+ int host_pae = 0;
4097+ struct guest_arch guest_archs[32];
4098+ int nr_guest_archs = 0;
4099+ virCapsPtr caps = NULL;
4100+
4101+ memset(guest_archs, 0, sizeof(guest_archs));
4102+
4103+ /* /proc/cpuinfo: flags: Intel calls HVM "vmx", AMD calls it "svm".
4104+ * It's not clear if this will work on IA64, let alone other
4105+ * architectures and non-Linux. (XXX)
4106+ */
4107+ if (cpuinfo) {
4108+ while (fgets (line, sizeof line, cpuinfo)) {
4109+ if (regexec (&flags_hvm_rec, line, sizeof(subs)/sizeof(regmatch_t), subs, 0) == 0
4110+ && subs[0].rm_so != -1) {
4111+ if (virStrncpy(hvm_type,
4112+ &line[subs[1].rm_so],
4113+ subs[1].rm_eo-subs[1].rm_so,
4114+ sizeof(hvm_type)) == NULL)
4115+ return NULL;
4116+ } else if (regexec (&flags_pae_rec, line, 0, NULL, 0) == 0)
4117+ host_pae = 1;
4118+ }
4119+ }
4120+
4121+ /* Most of the useful info is in /sys/hypervisor/properties/capabilities
4122+ * which is documented in the code in xen-unstable.hg/xen/arch/.../setup.c.
4123+ *
4124+ * It is a space-separated list of supported guest architectures.
4125+ *
4126+ * For x86:
4127+ * TYP-VER-ARCH[p]
4128+ * ^ ^ ^ ^
4129+ * | | | +-- PAE supported
4130+ * | | +------- x86_32 or x86_64
4131+ * | +----------- the version of Xen, eg. "3.0"
4132+ * +--------------- "xen" or "hvm" for para or full virt respectively
4133+ *
4134+ * For PPC this file appears to be always empty (?)
4135+ *
4136+ * For IA64:
4137+ * TYP-VER-ARCH[be]
4138+ * ^ ^ ^ ^
4139+ * | | | +-- Big-endian supported
4140+ * | | +------- always "ia64"
4141+ * | +----------- the version of Xen, eg. "3.0"
4142+ * +--------------- "xen" or "hvm" for para or full virt respectively
4143+ */
4144+
4145+ /* Expecting one line in this file - ignore any more. */
4146+ if ((capabilities) && (fgets (line, sizeof line, capabilities))) {
4147+ /* Split the line into tokens. strtok_r is OK here because we "own"
4148+ * this buffer. Parse out the features from each token.
4149+ */
4150+ for (str = line, nr_guest_archs = 0;
4151+ nr_guest_archs < sizeof guest_archs / sizeof guest_archs[0]
4152+ && (token = strtok_r (str, " ", &saveptr)) != NULL;
4153+ str = NULL) {
4154+
4155+ if (regexec (&xen_cap_rec, token, sizeof subs / sizeof subs[0],
4156+ subs, 0) == 0) {
4157+ int hvm = STRPREFIX(&token[subs[1].rm_so], "hvm");
4158+ const char *model;
4159+ int bits, pae = 0, nonpae = 0, ia64_be = 0;
4160+
4161+ if (STRPREFIX(&token[subs[2].rm_so], "x86_32")) {
4162+ model = "i686";
4163+ bits = 32;
4164+ if (subs[3].rm_so != -1 &&
4165+ STRPREFIX(&token[subs[3].rm_so], "p"))
4166+ pae = 1;
4167+ else
4168+ nonpae = 1;
4169+ }
4170+ else if (STRPREFIX(&token[subs[2].rm_so], "x86_64")) {
4171+ model = "x86_64";
4172+ bits = 64;
4173+ }
4174+ else if (STRPREFIX(&token[subs[2].rm_so], "ia64")) {
4175+ model = "ia64";
4176+ bits = 64;
4177+ if (subs[3].rm_so != -1 &&
4178+ STRPREFIX(&token[subs[3].rm_so], "be"))
4179+ ia64_be = 1;
4180+ }
4181+ else if (STRPREFIX(&token[subs[2].rm_so], "powerpc64")) {
4182+ model = "ppc64";
4183+ bits = 64;
4184+ } else {
4185+ /* XXX surely no other Xen archs exist */
4186+ continue;
4187+ }
4188+
4189+ /* Search for existing matching (model,hvm) tuple */
4190+ for (i = 0 ; i < nr_guest_archs ; i++) {
4191+ if (STREQ(guest_archs[i].model, model) &&
4192+ guest_archs[i].hvm == hvm) {
4193+ break;
4194+ }
4195+ }
4196+
4197+ /* Too many arch flavours - highly unlikely ! */
4198+ if (i >= ARRAY_CARDINALITY(guest_archs))
4199+ continue;
4200+ /* Didn't find a match, so create a new one */
4201+ if (i == nr_guest_archs)
4202+ nr_guest_archs++;
4203+
4204+ guest_archs[i].model = model;
4205+ guest_archs[i].bits = bits;
4206+ guest_archs[i].hvm = hvm;
4207+
4208+ /* Careful not to overwrite a previous positive
4209+ setting with a negative one here - some archs
4210+ can do both pae & non-pae, but Xen reports
4211+ separately capabilities so we're merging archs */
4212+ if (pae)
4213+ guest_archs[i].pae = pae;
4214+ if (nonpae)
4215+ guest_archs[i].nonpae = nonpae;
4216+ if (ia64_be)
4217+ guest_archs[i].ia64_be = ia64_be;
4218+ }
4219+ }
4220+ }
4221+
4222+ if ((caps = xenHypervisorBuildCapabilities(conn,
4223+ hostmachine,
4224+ host_pae,
4225+ hvm_type,
4226+ guest_archs,
4227+ nr_guest_archs)) == NULL)
4228+ goto no_memory;
4229+
4230+ return caps;
4231+
4232+ no_memory:
4233+ virReportOOMError();
4234+ virCapabilitiesFree(caps);
4235+ return NULL;
4236+}
4237+
4238+/**
4239+ * xenHypervisorMakeCapabilities:
4240+ *
4241+ * Return the capabilities of this hypervisor.
4242+ */
4243+virCapsPtr
4244+xenHypervisorMakeCapabilities(virConnectPtr conn)
4245+{
4246+#ifdef __sun
4247+ return xenHypervisorMakeCapabilitiesSunOS(conn);
4248+#else
4249+ virCapsPtr caps;
4250+ FILE *cpuinfo, *capabilities;
4251+ struct utsname utsname;
4252+
4253+ /* Really, this never fails - look at the man-page. */
4254+ uname (&utsname);
4255+
4256+ cpuinfo = fopen ("/proc/cpuinfo", "r");
4257+ if (cpuinfo == NULL) {
4258+ if (errno != ENOENT) {
4259+ virReportSystemError(errno,
4260+ _("cannot read file %s"),
4261+ "/proc/cpuinfo");
4262+ return NULL;
4263+ }
4264+ }
4265+
4266+ capabilities = fopen ("/sys/hypervisor/properties/capabilities", "r");
4267+ if (capabilities == NULL) {
4268+ if (errno != ENOENT) {
4269+ fclose(cpuinfo);
4270+ virReportSystemError(errno,
4271+ _("cannot read file %s"),
4272+ "/sys/hypervisor/properties/capabilities");
4273+ return NULL;
4274+ }
4275+ }
4276+
4277+ caps = xenHypervisorMakeCapabilitiesInternal(conn,
4278+ utsname.machine,
4279+ cpuinfo,
4280+ capabilities);
4281+
4282+ if (cpuinfo)
4283+ fclose(cpuinfo);
4284+ if (capabilities)
4285+ fclose(capabilities);
4286+
4287+ return caps;
4288+#endif /* __sun */
4289+}
4290+
4291+
4292+
4293+/**
4294+ * xenHypervisorGetCapabilities:
4295+ * @conn: pointer to the connection block
4296+ *
4297+ * Return the capabilities of this hypervisor.
4298+ */
4299+char *
4300+xenHypervisorGetCapabilities (virConnectPtr conn)
4301+{
4302+ xenUnifiedPrivatePtr priv = (xenUnifiedPrivatePtr) conn->privateData;
4303+ char *xml;
4304+
4305+ if (!(xml = virCapabilitiesFormatXML(priv->caps))) {
4306+ virReportOOMError();
4307+ return NULL;
4308+ }
4309+
4310+ return xml;
4311+}
4312+
4313+
4314+/**
4315+ * xenHypervisorNumOfDomains:
4316+ * @conn: pointer to the connection block
4317+ *
4318+ * Provides the number of active domains.
4319+ *
4320+ * Returns the number of domain found or -1 in case of error
4321+ */
4322+int
4323+xenHypervisorNumOfDomains(virConnectPtr conn)
4324+{
4325+ xen_getdomaininfolist dominfos;
4326+ int ret, nbids;
4327+ static int last_maxids = 2;
4328+ int maxids = last_maxids;
4329+ xenUnifiedPrivatePtr priv;
4330+
4331+ if (conn == NULL)
4332+ return -1;
4333+ priv = (xenUnifiedPrivatePtr) conn->privateData;
4334+ if (priv->handle < 0)
4335+ return (-1);
4336+
4337+ retry:
4338+ if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
4339+ virReportOOMError();
4340+ return(-1);
4341+ }
4342+
4343+ XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
4344+
4345+ ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
4346+
4347+ XEN_GETDOMAININFOLIST_FREE(dominfos);
4348+
4349+ if (ret < 0)
4350+ return (-1);
4351+
4352+ nbids = ret;
4353+ /* Can't possibly have more than 65,000 concurrent guests
4354+ * so limit how many times we try, to avoid increasing
4355+ * without bound & thus allocating all of system memory !
4356+ * XXX I'll regret this comment in a few years time ;-)
4357+ */
4358+ if (nbids == maxids) {
4359+ if (maxids < 65000) {
4360+ last_maxids *= 2;
4361+ maxids *= 2;
4362+ goto retry;
4363+ }
4364+ nbids = -1;
4365+ }
4366+ if ((nbids < 0) || (nbids > maxids))
4367+ return(-1);
4368+ return(nbids);
4369+}
4370+
4371+/**
4372+ * xenHypervisorListDomains:
4373+ * @conn: pointer to the connection block
4374+ * @ids: array to collect the list of IDs of active domains
4375+ * @maxids: size of @ids
4376+ *
4377+ * Collect the list of active domains, and store their ID in @maxids
4378+ *
4379+ * Returns the number of domain found or -1 in case of error
4380+ */
4381+int
4382+xenHypervisorListDomains(virConnectPtr conn, int *ids, int maxids)
4383+{
4384+ xen_getdomaininfolist dominfos;
4385+ int ret, nbids, i;
4386+ xenUnifiedPrivatePtr priv;
4387+
4388+ if (conn == NULL)
4389+ return -1;
4390+
4391+ priv = (xenUnifiedPrivatePtr) conn->privateData;
4392+ if (priv->handle < 0 ||
4393+ (ids == NULL) || (maxids < 0))
4394+ return (-1);
4395+
4396+ if (maxids == 0)
4397+ return(0);
4398+
4399+ if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
4400+ virReportOOMError();
4401+ return(-1);
4402+ }
4403+
4404+ XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
4405+ memset(ids, 0, maxids * sizeof(int));
4406+
4407+ ret = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
4408+
4409+ if (ret < 0) {
4410+ XEN_GETDOMAININFOLIST_FREE(dominfos);
4411+ return (-1);
4412+ }
4413+
4414+ nbids = ret;
4415+ if ((nbids < 0) || (nbids > maxids)) {
4416+ XEN_GETDOMAININFOLIST_FREE(dominfos);
4417+ return(-1);
4418+ }
4419+
4420+ for (i = 0;i < nbids;i++) {
4421+ ids[i] = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
4422+ }
4423+
4424+ XEN_GETDOMAININFOLIST_FREE(dominfos);
4425+ return (nbids);
4426+}
4427+
4428+
4429+#ifndef PROXY
4430+char *
4431+xenHypervisorDomainGetOSType (virDomainPtr dom)
4432+{
4433+ xenUnifiedPrivatePtr priv;
4434+ xen_getdomaininfo dominfo;
4435+ char *ostype = NULL;
4436+
4437+ priv = (xenUnifiedPrivatePtr) dom->conn->privateData;
4438+ if (priv->handle < 0) {
4439+ virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
4440+ _("domain shut off or invalid"), 0);
4441+ return (NULL);
4442+ }
4443+
4444+ /* HV's earlier than 3.1.0 don't include the HVM flags in guests status*/
4445+ if (hypervisor_version < 2 ||
4446+ dom_interface_version < 4) {
4447+ virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
4448+ _("unsupported in dom interface < 4"), 0);
4449+ return (NULL);
4450+ }
4451+
4452+ XEN_GETDOMAININFO_CLEAR(dominfo);
4453+
4454+ if (virXen_getdomaininfo(priv->handle, dom->id, &dominfo) < 0) {
4455+ virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
4456+ _("cannot get domain details"), 0);
4457+ return (NULL);
4458+ }
4459+
4460+ if (XEN_GETDOMAININFO_DOMAIN(dominfo) != dom->id) {
4461+ virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
4462+ _("cannot get domain details"), 0);
4463+ return (NULL);
4464+ }
4465+
4466+ if (XEN_GETDOMAININFO_FLAGS(dominfo) & DOMFLAGS_HVM)
4467+ ostype = strdup("hvm");
4468+ else
4469+ ostype = strdup("linux");
4470+
4471+ if (ostype == NULL)
4472+ virReportOOMError();
4473+
4474+ return ostype;
4475+}
4476+
4477+int
4478+xenHypervisorHasDomain(virConnectPtr conn,
4479+ int id)
4480+{
4481+ xenUnifiedPrivatePtr priv;
4482+ xen_getdomaininfo dominfo;
4483+
4484+ priv = (xenUnifiedPrivatePtr) conn->privateData;
4485+ if (priv->handle < 0)
4486+ return 0;
4487+
4488+ XEN_GETDOMAININFO_CLEAR(dominfo);
4489+
4490+ if (virXen_getdomaininfo(priv->handle, id, &dominfo) < 0)
4491+ return 0;
4492+
4493+ if (XEN_GETDOMAININFO_DOMAIN(dominfo) != id)
4494+ return 0;
4495+
4496+ return 1;
4497+}
4498+
4499+virDomainPtr
4500+xenHypervisorLookupDomainByID(virConnectPtr conn,
4501+ int id)
4502+{
4503+ xenUnifiedPrivatePtr priv;
4504+ xen_getdomaininfo dominfo;
4505+ virDomainPtr ret;
4506+ char *name;
4507+
4508+ priv = (xenUnifiedPrivatePtr) conn->privateData;
4509+ if (priv->handle < 0)
4510+ return (NULL);
4511+
4512+ XEN_GETDOMAININFO_CLEAR(dominfo);
4513+
4514+ if (virXen_getdomaininfo(priv->handle, id, &dominfo) < 0)
4515+ return (NULL);
4516+
4517+ if (XEN_GETDOMAININFO_DOMAIN(dominfo) != id)
4518+ return (NULL);
4519+
4520+ xenUnifiedLock(priv);
4521+ name = xenStoreDomainGetName(conn, id);
4522+ xenUnifiedUnlock(priv);
4523+ if (!name)
4524+ return (NULL);
4525+
4526+ ret = virGetDomain(conn, name, XEN_GETDOMAININFO_UUID(dominfo));
4527+ if (ret)
4528+ ret->id = id;
4529+ VIR_FREE(name);
4530+ return ret;
4531+}
4532+
4533+
4534+virDomainPtr
4535+xenHypervisorLookupDomainByUUID(virConnectPtr conn,
4536+ const unsigned char *uuid)
4537+{
4538+ xen_getdomaininfolist dominfos;
4539+ xenUnifiedPrivatePtr priv;
4540+ virDomainPtr ret;
4541+ char *name;
4542+ int maxids = 100, nids, i, id;
4543+
4544+ priv = (xenUnifiedPrivatePtr) conn->privateData;
4545+ if (priv->handle < 0)
4546+ return (NULL);
4547+
4548+ retry:
4549+ if (!(XEN_GETDOMAININFOLIST_ALLOC(dominfos, maxids))) {
4550+ virReportOOMError();
4551+ return(NULL);
4552+ }
4553+
4554+ XEN_GETDOMAININFOLIST_CLEAR(dominfos, maxids);
4555+
4556+ nids = virXen_getdomaininfolist(priv->handle, 0, maxids, &dominfos);
4557+
4558+ if (nids < 0) {
4559+ XEN_GETDOMAININFOLIST_FREE(dominfos);
4560+ return (NULL);
4561+ }
4562+
4563+ /* Can't possibly have more than 65,000 concurrent guests
4564+ * so limit how many times we try, to avoid increasing
4565+ * without bound & thus allocating all of system memory !
4566+ * XXX I'll regret this comment in a few years time ;-)
4567+ */
4568+ if (nids == maxids) {
4569+ XEN_GETDOMAININFOLIST_FREE(dominfos);
4570+ if (maxids < 65000) {
4571+ maxids *= 2;
4572+ goto retry;
4573+ }
4574+ return (NULL);
4575+ }
4576+
4577+ id = -1;
4578+ for (i = 0 ; i < nids ; i++) {
4579+ if (memcmp(XEN_GETDOMAININFOLIST_UUID(dominfos, i), uuid, VIR_UUID_BUFLEN) == 0) {
4580+ id = XEN_GETDOMAININFOLIST_DOMAIN(dominfos, i);
4581+ break;
4582+ }
4583+ }
4584+ XEN_GETDOMAININFOLIST_FREE(dominfos);
4585+
4586+ if (id == -1)
4587+ return (NULL);
4588+
4589+ xenUnifiedLock(priv);
4590+ name = xenStoreDomainGetName(conn, id);
4591+ xenUnifiedUnlock(priv);
4592+ if (!name)
4593+ return (NULL);
4594+
4595+ ret = virGetDomain(conn, name, uuid);
4596+ if (ret)
4597+ ret->id = id;
4598+ VIR_FREE(name);
4599+ return ret;
4600+}
4601+#endif
4602+
4603+/**
4604+ * xenHypervisorGetMaxVcpus:
4605+ *
4606+ * Returns the maximum of CPU defined by Xen.
4607+ */
4608+int
4609+xenHypervisorGetMaxVcpus(virConnectPtr conn,
4610+ const char *type ATTRIBUTE_UNUSED)
4611+{
4612+ xenUnifiedPrivatePtr priv;
4613+
4614+ if (conn == NULL)
4615+ return -1;
4616+ priv = (xenUnifiedPrivatePtr) conn->privateData;
4617+ if (priv->handle < 0)
4618+ return (-1);
4619+
4620+ return MAX_VIRT_CPUS;
4621+}
4622+
4623+/**
4624+ * xenHypervisorGetDomMaxMemory:
4625+ * @conn: connection data
4626+ * @id: domain id
4627+ *
4628+ * Retrieve the maximum amount of physical memory allocated to a
4629+ * domain.
4630+ *
4631+ * Returns the memory size in kilobytes or 0 in case of error.
4632+ */
4633+unsigned long
4634+xenHypervisorGetDomMaxMemory(virConnectPtr conn, int id)
4635+{
4636+ xenUnifiedPrivatePtr priv;
4637+ xen_getdomaininfo dominfo;
4638+ int ret;
4639+
4640+ if (conn == NULL)
4641+ return 0;
4642+
4643+ priv = (xenUnifiedPrivatePtr) conn->privateData;
4644+ if (priv->handle < 0)
4645+ return 0;
4646+
4647+ if (kb_per_pages == 0) {
4648+ kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
4649+ if (kb_per_pages <= 0)
4650+ kb_per_pages = 4;
4651+ }
4652+
4653+ XEN_GETDOMAININFO_CLEAR(dominfo);
4654+
4655+ ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
4656+
4657+ if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
4658+ return (0);
4659+
4660+ return((unsigned long) XEN_GETDOMAININFO_MAX_PAGES(dominfo) * kb_per_pages);
4661+}
4662+
4663+#ifndef PROXY
4664+/**
4665+ * xenHypervisorGetMaxMemory:
4666+ * @domain: a domain object or NULL
4667+ *
4668+ * Retrieve the maximum amount of physical memory allocated to a
4669+ * domain. If domain is NULL, then this get the amount of memory reserved
4670+ * to Domain0 i.e. the domain where the application runs.
4671+ *
4672+ * Returns the memory size in kilobytes or 0 in case of error.
4673+ */
4674+static unsigned long ATTRIBUTE_NONNULL (1)
4675+xenHypervisorGetMaxMemory(virDomainPtr domain)
4676+{
4677+ xenUnifiedPrivatePtr priv;
4678+
4679+ if (domain->conn == NULL)
4680+ return 0;
4681+
4682+ priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
4683+ if (priv->handle < 0 || domain->id < 0)
4684+ return (0);
4685+
4686+ return(xenHypervisorGetDomMaxMemory(domain->conn, domain->id));
4687+}
4688+#endif
4689+
4690+/**
4691+ * xenHypervisorGetDomInfo:
4692+ * @conn: connection data
4693+ * @id: the domain ID
4694+ * @info: the place where information should be stored
4695+ *
4696+ * Do an hypervisor call to get the related set of domain information.
4697+ *
4698+ * Returns 0 in case of success, -1 in case of error.
4699+ */
4700+int
4701+xenHypervisorGetDomInfo(virConnectPtr conn, int id, virDomainInfoPtr info)
4702+{
4703+ xenUnifiedPrivatePtr priv;
4704+ xen_getdomaininfo dominfo;
4705+ int ret;
4706+ uint32_t domain_flags, domain_state, domain_shutdown_cause;
4707+
4708+ if (kb_per_pages == 0) {
4709+ kb_per_pages = sysconf(_SC_PAGESIZE) / 1024;
4710+ if (kb_per_pages <= 0)
4711+ kb_per_pages = 4;
4712+ }
4713+
4714+ if (conn == NULL)
4715+ return -1;
4716+
4717+ priv = (xenUnifiedPrivatePtr) conn->privateData;
4718+ if (priv->handle < 0 || info == NULL)
4719+ return (-1);
4720+
4721+ memset(info, 0, sizeof(virDomainInfo));
4722+ XEN_GETDOMAININFO_CLEAR(dominfo);
4723+
4724+ ret = virXen_getdomaininfo(priv->handle, id, &dominfo);
4725+
4726+ if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != id))
4727+ return (-1);
4728+
4729+ domain_flags = XEN_GETDOMAININFO_FLAGS(dominfo);
4730+ domain_flags &= ~DOMFLAGS_HVM; /* Mask out HVM flags */
4731+ domain_state = domain_flags & 0xFF; /* Mask out high bits */
4732+ switch (domain_state) {
4733+ case DOMFLAGS_DYING:
4734+ info->state = VIR_DOMAIN_SHUTDOWN;
4735+ break;
4736+ case DOMFLAGS_SHUTDOWN:
4737+ /* The domain is shutdown. Determine the cause. */
4738+ domain_shutdown_cause = domain_flags >> DOMFLAGS_SHUTDOWNSHIFT;
4739+ switch (domain_shutdown_cause) {
4740+ case SHUTDOWN_crash:
4741+ info->state = VIR_DOMAIN_CRASHED;
4742+ break;
4743+ default:
4744+ info->state = VIR_DOMAIN_SHUTOFF;
4745+ }
4746+ break;
4747+ case DOMFLAGS_PAUSED:
4748+ info->state = VIR_DOMAIN_PAUSED;
4749+ break;
4750+ case DOMFLAGS_BLOCKED:
4751+ info->state = VIR_DOMAIN_BLOCKED;
4752+ break;
4753+ case DOMFLAGS_RUNNING:
4754+ info->state = VIR_DOMAIN_RUNNING;
4755+ break;
4756+ default:
4757+ info->state = VIR_DOMAIN_NOSTATE;
4758+ }
4759+
4760+ /*
4761+ * the API brings back the cpu time in nanoseconds,
4762+ * convert to microseconds, same thing convert to
4763+ * kilobytes from page counts
4764+ */
4765+ info->cpuTime = XEN_GETDOMAININFO_CPUTIME(dominfo);
4766+ info->memory = XEN_GETDOMAININFO_TOT_PAGES(dominfo) * kb_per_pages;
4767+ info->maxMem = XEN_GETDOMAININFO_MAX_PAGES(dominfo);
4768+ if(info->maxMem != UINT_MAX)
4769+ info->maxMem *= kb_per_pages;
4770+ info->nrVirtCpu = XEN_GETDOMAININFO_CPUCOUNT(dominfo);
4771+ return (0);
4772+}
4773+
4774+/**
4775+ * xenHypervisorGetDomainInfo:
4776+ * @domain: pointer to the domain block
4777+ * @info: the place where information should be stored
4778+ *
4779+ * Do an hypervisor call to get the related set of domain information.
4780+ *
4781+ * Returns 0 in case of success, -1 in case of error.
4782+ */
4783+int
4784+xenHypervisorGetDomainInfo(virDomainPtr domain, virDomainInfoPtr info)
4785+{
4786+ xenUnifiedPrivatePtr priv;
4787+
4788+ if (domain->conn == NULL)
4789+ return -1;
4790+
4791+ priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
4792+ if (priv->handle < 0 || info == NULL ||
4793+ (domain->id < 0))
4794+ return (-1);
4795+
4796+ return(xenHypervisorGetDomInfo(domain->conn, domain->id, info));
4797+
4798+}
4799+
4800+#ifndef PROXY
4801+/**
4802+ * xenHypervisorNodeGetCellsFreeMemory:
4803+ * @conn: pointer to the hypervisor connection
4804+ * @freeMems: pointer to the array of unsigned long long
4805+ * @startCell: index of first cell to return freeMems info on.
4806+ * @maxCells: Maximum number of cells for which freeMems information can
4807+ * be returned.
4808+ *
4809+ * This call returns the amount of free memory in one or more NUMA cells.
4810+ * The @freeMems array must be allocated by the caller and will be filled
4811+ * with the amount of free memory in kilobytes for each cell requested,
4812+ * starting with startCell (in freeMems[0]), up to either
4813+ * (startCell + maxCells), or the number of additional cells in the node,
4814+ * whichever is smaller.
4815+ *
4816+ * Returns the number of entries filled in freeMems, or -1 in case of error.
4817+ */
4818+int
4819+xenHypervisorNodeGetCellsFreeMemory(virConnectPtr conn, unsigned long long *freeMems,
4820+ int startCell, int maxCells)
4821+{
4822+ xen_op_v2_sys op_sys;
4823+ int i, j, ret;
4824+ xenUnifiedPrivatePtr priv;
4825+
4826+ if (conn == NULL) {
4827+ virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
4828+ "invalid argument", 0);
4829+ return -1;
4830+ }
4831+
4832+ priv = conn->privateData;
4833+
4834+ if (priv->nbNodeCells < 0) {
4835+ virXenErrorFunc(VIR_ERR_XEN_CALL, __FUNCTION__,
4836+ "cannot determine actual number of cells",0);
4837+ return(-1);
4838+ }
4839+
4840+ if ((maxCells < 1) || (startCell >= priv->nbNodeCells)) {
4841+ virXenErrorFunc(VIR_ERR_INVALID_ARG, __FUNCTION__,
4842+ "invalid argument", 0);
4843+ return -1;
4844+ }
4845+
4846+ /*
4847+ * Support only sys_interface_version >=4
4848+ */
4849+ if (sys_interface_version < SYS_IFACE_MIN_VERS_NUMA) {
4850+ virXenErrorFunc(VIR_ERR_XEN_CALL, __FUNCTION__,
4851+ "unsupported in sys interface < 4", 0);
4852+ return -1;
4853+ }
4854+
4855+ if (priv->handle < 0) {
4856+ virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
4857+ "priv->handle invalid", 0);
4858+ return -1;
4859+ }
4860+
4861+ memset(&op_sys, 0, sizeof(op_sys));
4862+ op_sys.cmd = XEN_V2_OP_GETAVAILHEAP;
4863+
4864+ for (i = startCell, j = 0;(i < priv->nbNodeCells) && (j < maxCells);i++,j++) {
4865+ if (sys_interface_version >= 5)
4866+ op_sys.u.availheap5.node = i;
4867+ else
4868+ op_sys.u.availheap.node = i;
4869+ ret = xenHypervisorDoV2Sys(priv->handle, &op_sys);
4870+ if (ret < 0) {
4871+ return(-1);
4872+ }
4873+ if (sys_interface_version >= 5)
4874+ freeMems[j] = op_sys.u.availheap5.avail_bytes;
4875+ else
4876+ freeMems[j] = op_sys.u.availheap.avail_bytes;
4877+ }
4878+ return (j);
4879+}
4880+
4881+
4882+/**
4883+ * xenHypervisorPauseDomain:
4884+ * @domain: pointer to the domain block
4885+ *
4886+ * Do an hypervisor call to pause the given domain
4887+ *
4888+ * Returns 0 in case of success, -1 in case of error.
4889+ */
4890+int
4891+xenHypervisorPauseDomain(virDomainPtr domain)
4892+{
4893+ int ret;
4894+ xenUnifiedPrivatePtr priv;
4895+
4896+ if (domain->conn == NULL)
4897+ return -1;
4898+
4899+ priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
4900+ if (priv->handle < 0 || domain->id < 0)
4901+ return (-1);
4902+
4903+ ret = virXen_pausedomain(priv->handle, domain->id);
4904+ if (ret < 0)
4905+ return (-1);
4906+ return (0);
4907+}
4908+
4909+/**
4910+ * xenHypervisorResumeDomain:
4911+ * @domain: pointer to the domain block
4912+ *
4913+ * Do an hypervisor call to resume the given domain
4914+ *
4915+ * Returns 0 in case of success, -1 in case of error.
4916+ */
4917+int
4918+xenHypervisorResumeDomain(virDomainPtr domain)
4919+{
4920+ int ret;
4921+ xenUnifiedPrivatePtr priv;
4922+
4923+ if (domain->conn == NULL)
4924+ return -1;
4925+
4926+ priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
4927+ if (priv->handle < 0 || domain->id < 0)
4928+ return (-1);
4929+
4930+ ret = virXen_unpausedomain(priv->handle, domain->id);
4931+ if (ret < 0)
4932+ return (-1);
4933+ return (0);
4934+}
4935+
4936+/**
4937+ * xenHypervisorDestroyDomain:
4938+ * @domain: pointer to the domain block
4939+ *
4940+ * Do an hypervisor call to destroy the given domain
4941+ *
4942+ * Returns 0 in case of success, -1 in case of error.
4943+ */
4944+int
4945+xenHypervisorDestroyDomain(virDomainPtr domain)
4946+{
4947+ int ret;
4948+ xenUnifiedPrivatePtr priv;
4949+
4950+ if (domain->conn == NULL)
4951+ return -1;
4952+
4953+ priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
4954+ if (priv->handle < 0 || domain->id < 0)
4955+ return (-1);
4956+
4957+ ret = virXen_destroydomain(priv->handle, domain->id);
4958+ if (ret < 0)
4959+ return (-1);
4960+ return (0);
4961+}
4962+
4963+/**
4964+ * xenHypervisorSetMaxMemory:
4965+ * @domain: pointer to the domain block
4966+ * @memory: the max memory size in kilobytes.
4967+ *
4968+ * Do an hypervisor call to change the maximum amount of memory used
4969+ *
4970+ * Returns 0 in case of success, -1 in case of error.
4971+ */
4972+int
4973+xenHypervisorSetMaxMemory(virDomainPtr domain, unsigned long memory)
4974+{
4975+ int ret;
4976+ xenUnifiedPrivatePtr priv;
4977+
4978+ if (domain->conn == NULL)
4979+ return -1;
4980+
4981+ priv = (xenUnifiedPrivatePtr) domain->conn->privateData;
4982+ if (priv->handle < 0 || domain->id < 0)
4983+ return (-1);
4984+
4985+ ret = virXen_setmaxmem(priv->handle, domain->id, memory);
4986+ if (ret < 0)
4987+ return (-1);
4988+ return (0);
4989+}
4990+#endif /* PROXY */
4991+
4992+#ifndef PROXY
4993+/**
4994+ * xenHypervisorSetVcpus:
4995+ * @domain: pointer to domain object
4996+ * @nvcpus: the new number of virtual CPUs for this domain
4997+ *
4998+ * Dynamically change the number of virtual CPUs used by the domain.
4999+ *
5000+ * Returns 0 in case of success, -1 in case of failure.
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: