From: Zhiyuan Shao <zyshao.maillist@gmail.com>
To: xen-devel@lists.xensource.com
Subject: Does Xen-4.0 + pvops kernel still supports PV guest?
Date: Fri, 17 Dec 2010 22:23:20 +0800 [thread overview]
Message-ID: <4D0B7258.1070608@gmail.com> (raw)
In-Reply-To: <AANLkTiloe7jgMO49i72sF0MDFmuHJJeysEBb0oLVNono@mail.gmail.com>
[-- Attachment #1: Type: text/plain, Size: 931 bytes --]
hi all,
I had installed Xen-4.0 in my Ubuntu desktop (Maverick Meerkat, 10.10)
according to https://help.ubuntu.com/community/Xen
although, I had to replace grub 2.0 with old version to make everything
work.
After rebooting, I want to create a PV guest with old method I had used
in Xen 3.4.2. However, I got the following messages, and the creation
failed.
zhiyuan@zhiyuan-ThinkPad-R400:~/xen_work/images/rhel51-pv$ sudo xm cr -c
1-pv.cfg
[sudo] password for zhiyuan:
Using config file "./1-pv.cfg".
Error: (4, 'Out of memory', 'xc_dom_alloc_segment: segment ramdisk too
large (0x11575 > 0x4000 - 0x1b8f pages)\n')
zhiyuan@zhiyuan-ThinkPad-R400:~/xen_work/images/rhel51-pv$
The configuration file is attached with this email.
BTW, I booted the system with xen_commandline: dom0_mem=1024M sched=credit
and my box has 2GB memory, Intel(R) Core(TM)2 Duo CPU P8600 @
2.40GHz processor.
Thanks in advance!
Zhiyuan
[-- Attachment #2: 1-pv.cfg --]
[-- Type: text/plain, Size: 7626 bytes --]
# -*- mode: python; -*-
#============================================================================
# Python configuration setup for 'xm create'.
# This script sets the parameters used when a domain is created using 'xm create'.
# You use a separate script for each domain you want to create, or
# you can set the parameters for the domain on the xm command line.
#============================================================================
#----------------------------------------------------------------------------
# Kernel image file.
kernel = "/boot/vmlinuz-2.6.32.25"
# Optional ramdisk.
ramdisk = "/boot/initrd.img-2.6.32.25"
# The domain build function. Default is 'linux'.
#builder='linux'
# Initial memory allocation (in megabytes) for the new domain.
#
# WARNING: Creating a domain with insufficient memory may cause out of
# memory errors. The domain needs enough memory to boot kernel
# and modules. Allocating less than 32MBs is not recommended.
memory = 64
# A name for your domain. All domains must have different names.
name = "1-pv"
# 128-bit UUID for the domain. The default behavior is to generate a new UUID
# on each call to 'xm create'.
#uuid = "06ed00fe-1162-4fc4-b5d8-11993ee4a8b9"
# List of which CPUS this domain is allowed to use, default Xen picks
#cpus = "" # leave to Xen to pick
#cpus = "0" # all vcpus run on CPU0
#cpus = "0-3,5,^1" # all vcpus run on cpus 0,2,3,5
#cpus = ["2", "3"] # VCPU0 runs on CPU2, VCPU1 runs on CPU3
# Number of Virtual CPUS to use, default is 1
vcpus = 2
#----------------------------------------------------------------------------
# Define network interfaces.
# By default, no network interfaces are configured. You may have one created
# with sensible defaults using an empty vif clause:
#
# vif = [ '' ]
#
# or optionally override backend, bridge, ip, mac, script, type, or vifname:
#
# vif = [ 'mac=00:16:3e:00:00:11, bridge=xenbr0' ]
#
# or more than one interface may be configured:
#
# vif = [ '', 'bridge=xenbr1' ]
#vif = [ 'ip=192.168.2.1, mac=00:16:3e:00:00:11, bridge=xenbr0' ]
#----------------------------------------------------------------------------
# Define the disk devices you want the domain to have access to, and
# what you want them accessible as.
# Each disk entry is of the form phy:UNAME,DEV,MODE
# where UNAME is the device, DEV is the device name the domain will see,
# and MODE is r for read-only, w for read-write.
disk = [ 'file:/home/zhiyuan/xen_work/images/rhel51-pv/vm1disk,sda1,w' ]
#----------------------------------------------------------------------------
# Define frame buffer device.
#
# By default, no frame buffer device is configured.
#
# To create one using the SDL backend and sensible defaults:
#
# vfb = [ 'type=sdl' ]
#
# This uses environment variables XAUTHORITY and DISPLAY. You
# can override that:
#
# vfb = [ 'type=sdl,xauthority=/home/bozo/.Xauthority,display=:1' ]
#
# To create one using the VNC backend and sensible defaults:
#
# vfb = [ 'type=vnc' ]
#
# The backend listens on 127.0.0.1 port 5900+N by default, where N is
# the domain ID. You can override both address and N:
#
# vfb = [ 'type=vnc,vnclisten=127.0.0.1,vncdisplay=1' ]
#
# Or you can bind the first unused port above 5900:
#
# vfb = [ 'type=vnc,vnclisten=0.0.0.0,vncunused=1' ]
#
# You can override the password:
#
# vfb = [ 'type=vnc,vncpasswd=MYPASSWD' ]
#
# Empty password disables authentication. Defaults to the vncpasswd
# configured in xend-config.sxp.
#----------------------------------------------------------------------------
# Define to which TPM instance the user domain should communicate.
# The vtpm entry is of the form 'instance=INSTANCE,backend=DOM'
# where INSTANCE indicates the instance number of the TPM the VM
# should be talking to and DOM provides the domain where the backend
# is located.
# Note that no two virtual machines should try to connect to the same
# TPM instance. The handling of all TPM instances does require
# some management effort in so far that VM configration files (and thus
# a VM) should be associated with a TPM instance throughout the lifetime
# of the VM / VM configuration file. The instance number must be
# greater or equal to 1.
#vtpm = [ 'instance=1,backend=0' ]
#----------------------------------------------------------------------------
# Set the kernel command line for the new domain.
# You only need to define the IP parameters and hostname if the domain's
# IP config doesn't, e.g. in ifcfg-eth0 or via DHCP.
# You can use 'extra' to set the runlevel and custom environment
# variables used by custom rc scripts (e.g. VMID=, usr= ).
# Set if you want dhcp to allocate the IP address.
#dhcp="dhcp"
# Set netmask.
#netmask=
# Set default gateway.
#gateway=
# Set the hostname.
#hostname= "vm%d" % vmid
# Set root device.
root = "/dev/sda1 ro"
# Root device for nfs.
#root = "/dev/nfs"
# The nfs server.
#nfs_server = '192.0.2.1'
# Root directory on the nfs server.
#nfs_root = '/full/path/to/root/directory'
# Sets runlevel 4.
extra = "4"
#----------------------------------------------------------------------------
# Configure the behaviour when a domain exits. There are three 'reasons'
# for a domain to stop: poweroff, reboot, and crash. For each of these you
# may specify:
#
# "destroy", meaning that the domain is cleaned up as normal;
# "restart", meaning that a new domain is started in place of the old
# one;
# "preserve", meaning that no clean-up is done until the domain is
# manually destroyed (using xm destroy, for example); or
# "rename-restart", meaning that the old domain is not cleaned up, but is
# renamed and a new domain started in its place.
#
# In the event a domain stops due to a crash, you have the additional options:
#
# "coredump-destroy", meaning dump the crashed domain's core and then destroy;
# "coredump-restart', meaning dump the crashed domain's core and the restart.
#
# The default is
#
# on_poweroff = 'destroy'
# on_reboot = 'restart'
# on_crash = 'restart'
#
# For backwards compatibility we also support the deprecated option restart
#
# restart = 'onreboot' means on_poweroff = 'destroy'
# on_reboot = 'restart'
# on_crash = 'destroy'
#
# restart = 'always' means on_poweroff = 'restart'
# on_reboot = 'restart'
# on_crash = 'restart'
#
# restart = 'never' means on_poweroff = 'destroy'
# on_reboot = 'destroy'
# on_crash = 'destroy'
#on_poweroff = 'destroy'
#on_reboot = 'restart'
#on_crash = 'restart'
#-----------------------------------------------------------------------------
# Configure PVSCSI devices:
#
#vscsi=[ 'PDEV, VDEV' ]
#
# PDEV gives physical SCSI device to be attached to specified guest
# domain by one of the following identifier format.
# - XX:XX:XX:XX (4-tuples with decimal notation which shows
# "host:channel:target:lun")
# - /dev/sdxx or sdx
# - /dev/stxx or stx
# - /dev/sgxx or sgx
# - result of 'scsi_id -gu -s'.
# ex. # scsi_id -gu -s /block/sdb
# 36000b5d0006a0000006a0257004c0000
#
# VDEV gives virtual SCSI device by 4-tuples (XX:XX:XX:XX) as
# which the specified guest domain recognize.
#
#vscsi = [ '/dev/sdx, 0:0:0:0' ]
#============================================================================
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
next prev parent reply other threads:[~2010-12-17 14:23 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-07-08 21:14 Questions regarding Xen Credit Scheduler Gaurav Dhiman
2010-07-09 12:13 ` George Dunlap
2010-07-10 9:21 ` Gaurav Dhiman
2010-07-12 11:05 ` George Dunlap
2010-07-16 0:41 ` Gaurav Dhiman
2010-07-16 9:13 ` George Dunlap
2010-07-16 11:04 ` George Dunlap
2010-12-17 14:23 ` Zhiyuan Shao [this message]
2010-12-17 14:36 ` Does Xen-4.0 + pvops kernel still supports PV guest? Ian Campbell
2010-12-17 14:51 ` Zhiyuan Shao
2010-12-17 15:07 ` Ian Campbell
2010-12-17 14:25 ` Questions regarding Xen Credit Scheduler Zhiyuan Shao
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4D0B7258.1070608@gmail.com \
--to=zyshao.maillist@gmail.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).