[Spice-commits] 25 commits - Makefile VERSION fpu/softfloat.c hw/i386 hw/pci-host hw/ppc include/fpu include/hw pc-bios/README pc-bios/slof.bin qga/vss-win32 roms/SLOF target-ppc/cpu.h target-ppc/excp_helper.c target-ppc/fpu_helper.c target-ppc/helper_regs.h target-ppc/translate_init.c ui/gtk.c

Gerd Hoffmann kraxel at kemper.freedesktop.org
Tue Apr 8 23:38:17 PDT 2014


 Makefile                          |    1 
 VERSION                           |    2 
 fpu/softfloat.c                   |   20 +
 hw/i386/acpi-dsdt-cpu-hotplug.dsl |    2 
 hw/pci-host/prep.c                |    8 
 hw/ppc/e500.c                     |    2 
 hw/ppc/ppc.c                      |   92 ++++---
 hw/ppc/ppce500_spin.c             |    6 
 include/fpu/softfloat.h           |    1 
 include/hw/ppc/ppc.h              |    3 
 pc-bios/README                    |    2 
 pc-bios/slof.bin                  |binary
 qga/vss-win32/install.cpp         |    3 
 roms/SLOF                         |    2 
 target-ppc/cpu.h                  |    1 
 target-ppc/excp_helper.c          |    5 
 target-ppc/fpu_helper.c           |  494 ++++++++++++++++++--------------------
 target-ppc/helper_regs.h          |    2 
 target-ppc/translate_init.c       |    8 
 ui/gtk.c                          |   19 +
 20 files changed, 377 insertions(+), 296 deletions(-)

New commits:
commit efcc87d9aedb590b8506cd1a7c8abe557c760f9e
Author: Peter Maydell <peter.maydell at linaro.org>
Date:   Tue Apr 8 18:52:06 2014 +0100

    Update version for v2.0.0-rc2 release
    
    Signed-off-by: Peter Maydell <peter.maydell at linaro.org>

diff --git a/VERSION b/VERSION
index 0ff4310..c9ba51a 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.7.91
+1.7.92
commit 7dc176bce49c61551b513508def136d5bb632b72
Author: Peter Maydell <peter.maydell at linaro.org>
Date:   Tue Apr 8 16:51:11 2014 +0100

    hw/pci-host/prep: Don't reverse IO accesses on bigendian hosts
    
    The raven_io_read() and raven_io_write() functions pass and
    return values in little-endian format (since the IO op struct
    is marked DEVICE_LITTLE_ENDIAN); however they were storing the
    values in the buffer to pass to address_space_read/write()
    in host-endian order, which meant that on big-endian hosts
    the values were inadvertently reversed. Use the *_le_p()
    accessors instead so that we are consistent regardless of
    host endianness.
    
    Strictly speaking the byte order of the buffer for
    address_space_rw() is target byte order (which for PPC
    will be BE) but it doesn't actually matter as long as we
    are consistent about the marking on the IO op struct and
    which stl_*_p().
    
    This bug was probably introduced due to confusion caused by
    the two different versions of ldl_p() and friends:
     bswap.h defines versions meaning "host endianness access"
     cpu-all.h defines versions meaning "target endianness access"
    As a target-independent source file prep.c gets the bswap.h
    versions; the very similar looking code in ioport.c is
    compiled per-target and gets the cpu-all.h versions.
    
    Signed-off-by: Peter Maydell <peter.maydell at linaro.org>
    Message-id: 1396972271-22660-1-git-send-email-peter.maydell at linaro.org
    Reviewed-by: Richard Henderson <rth at twiddle.net>

diff --git a/hw/pci-host/prep.c b/hw/pci-host/prep.c
index d3e746c..4014540 100644
--- a/hw/pci-host/prep.c
+++ b/hw/pci-host/prep.c
@@ -145,9 +145,9 @@ static uint64_t raven_io_read(void *opaque, hwaddr addr,
     if (size == 1) {
         return buf[0];
     } else if (size == 2) {
-        return lduw_p(buf);
+        return lduw_le_p(buf);
     } else if (size == 4) {
-        return ldl_p(buf);
+        return ldl_le_p(buf);
     } else {
         g_assert_not_reached();
     }
@@ -164,9 +164,9 @@ static void raven_io_write(void *opaque, hwaddr addr,
     if (size == 1) {
         buf[0] = val;
     } else if (size == 2) {
-        stw_p(buf, val);
+        stw_le_p(buf, val);
     } else if (size == 4) {
-        stl_p(buf, val);
+        stl_le_p(buf, val);
     } else {
         g_assert_not_reached();
     }
commit 9bc1a1d817670702f642633a325da346047f7508
Merge: 093de72 f2ccc31
Author: Peter Maydell <peter.maydell at linaro.org>
Date:   Tue Apr 8 13:59:28 2014 +0100

    Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
    
    acpi bug fix
    
    Here is a single last minute fix for 2.0
    
    This changes the HID of the container used to claim
    resources for CPU hotplug.
    As a result, windows XP SP3 no longer brings up
    an annoying "found new hardware" wizard on boot.
    
    Signed-off-by: Michael S. Tsirkin <mst at redhat.com>
    
    # gpg: Signature made Tue 08 Apr 2014 13:23:30 BST using RSA key ID D28D5469
    # gpg: Good signature from "Michael S. Tsirkin <mst at kernel.org>"
    # gpg:                 aka "Michael S. Tsirkin <mst at redhat.com>"
    
    * remotes/mst/tags/for_upstream:
      dsdt: tweak ACPI ID for hotplug resource device
    
    Signed-off-by: Peter Maydell <peter.maydell at linaro.org>

commit f2ccc311df55ec026a8f8ea9df998f26314f22b2
Author: Michael S. Tsirkin <mst at redhat.com>
Date:   Sun Apr 6 12:47:37 2014 +0300

    dsdt: tweak ACPI ID for hotplug resource device
    
    ACPI0004 seems too new:
    Windows XP complains about an unrecognized device.
    This is a regression since 1.7.
    Use PNP0A06 instead - Generic Container Device.
    
    Signed-off-by: Michael S. Tsirkin <mst at redhat.com>
    Reviewed-By: Igor Mammedov <imammedo at redhat.com>

diff --git a/hw/i386/acpi-dsdt-cpu-hotplug.dsl b/hw/i386/acpi-dsdt-cpu-hotplug.dsl
index dee4843..34aab5a 100644
--- a/hw/i386/acpi-dsdt-cpu-hotplug.dsl
+++ b/hw/i386/acpi-dsdt-cpu-hotplug.dsl
@@ -93,7 +93,7 @@ Scope(\_SB) {
     }
 
     Device(CPU_HOTPLUG_RESOURCE_DEVICE) {
-        Name(_HID, "ACPI0004")
+        Name(_HID, EisaId("PNP0A06"))
 
         Name(_CRS, ResourceTemplate() {
             IO(Decode16, CPU_STATUS_BASE, CPU_STATUS_BASE, 0, CPU_STATUS_LEN)
commit 093de72b9c226fe007f330c70a0d4ccb0baec17d
Merge: 9a4fb6a 800b0e8
Author: Peter Maydell <peter.maydell at linaro.org>
Date:   Tue Apr 8 13:05:25 2014 +0100

    Merge remote-tracking branch 'remotes/kraxel/tags/pull-gtk-5' into staging
    
    gtk: Implement grab-on-click behavior in relative mode
    
    # gpg: Signature made Tue 08 Apr 2014 12:58:49 BST using RSA key ID D3E87138
    # gpg: Good signature from "Gerd Hoffmann (work) <kraxel at redhat.com>"
    # gpg:                 aka "Gerd Hoffmann <gerd at kraxel.org>"
    # gpg:                 aka "Gerd Hoffmann (private) <kraxel at gmail.com>"
    
    * remotes/kraxel/tags/pull-gtk-5:
      gtk: Implement grab-on-click behavior in relative mode
    
    Signed-off-by: Peter Maydell <peter.maydell at linaro.org>

commit 800b0e814bef7cd14ae2bce149c09d70676e93fb
Author: Takashi Iwai <tiwai at suse.de>
Date:   Tue Apr 8 11:26:45 2014 +0200

    gtk: Implement grab-on-click behavior in relative mode
    
    This patch changes the behavior in the relative mode to be compatible
    with other UIs, namely, grabbing the input at the first left click.
    It improves the usability a lot; otherwise you have to press ctl-alt-G
    or select from menu at each time you want to move the pointer.  Also,
    the input grab is cleared when the current mode is switched to the
    absolute mode.
    
    The automatic reset of the implicit grabbing is needed since the
    switching to the absolute mode happens always after the click even on
    Gtk.  That is, we cannot check whether the absolute mode is already
    available at the first click time even though it should have been
    switched in X11 input driver side.
    
    Signed-off-by: Takashi Iwai <tiwai at suse.de>
    Signed-off-by: Gerd Hoffmann <kraxel at redhat.com>

diff --git a/ui/gtk.c b/ui/gtk.c
index 6668bd8..00fbbcc 100644
--- a/ui/gtk.c
+++ b/ui/gtk.c
@@ -476,8 +476,15 @@ static void gd_change_runstate(void *opaque, int running, RunState state)
 
 static void gd_mouse_mode_change(Notifier *notify, void *data)
 {
-    gd_update_cursor(container_of(notify, GtkDisplayState, mouse_mode_notifier),
-                     FALSE);
+    GtkDisplayState *s;
+
+    s = container_of(notify, GtkDisplayState, mouse_mode_notifier);
+    /* release the grab at switching to absolute mode */
+    if (qemu_input_is_absolute() && gd_is_grab_active(s)) {
+        gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item),
+                                       FALSE);
+    }
+    gd_update_cursor(s, FALSE);
 }
 
 /** GTK Events **/
@@ -685,6 +692,14 @@ static gboolean gd_button_event(GtkWidget *widget, GdkEventButton *button,
     GtkDisplayState *s = opaque;
     InputButton btn;
 
+    /* implicitly grab the input at the first click in the relative mode */
+    if (button->button == 1 && button->type == GDK_BUTTON_PRESS &&
+        !qemu_input_is_absolute() && !gd_is_grab_active(s)) {
+        gtk_check_menu_item_set_active(GTK_CHECK_MENU_ITEM(s->grab_item),
+                                       TRUE);
+        return TRUE;
+    }
+
     if (button->button == 1) {
         btn = INPUT_BUTTON_LEFT;
     } else if (button->button == 2) {
commit 9a4fb6aa19d1aa8dfb3abb6210734b1a1df9e322
Merge: e792933 06f6e12
Author: Peter Maydell <peter.maydell at linaro.org>
Date:   Tue Apr 8 10:58:31 2014 +0100

    Merge remote-tracking branch 'remotes/agraf/tags/signed-ppc-for-upstream' into staging
    
    Patch queue for ppc - 2014-04-08
    
    This is the final queue for 2.0! It fixes a lot of bugs people have
    seen during testing:
    
      - Fix e500 SMP
      - Fix book3s_64 DEC
      - Fix VSX (new feature in 2.0) for LE hosts
      - Fix PR KVM on top of pHyp (SLOF update)
    
    # gpg: Signature made Tue 08 Apr 2014 10:24:18 BST using RSA key ID 03FEDC60
    # gpg: Can't check signature: public key not found
    
    * remotes/agraf/tags/signed-ppc-for-upstream:
      PPC: Add l1 cache sizes for 970 and above systems
      ppce500_spin: Initialize struct properly
      PPC: Only enter MSR_POW when no interrupts pending
      PPC: Clean up DECR implementation
      target-ppc: Correct VSX Integer to FP Conversion
      target-ppc: Correct VSX FP to Integer Conversion
      target-ppc: Correct VSX FP to FP Conversions
      target-ppc: Correct VSX Scalar Compares
      target-ppc: Correct Simple VSR LE Host Inversions
      target-ppc: Correct LE Host Inversion of Lower VSRs
      target-ppc: Define Endian-Correct Accessors for VSR Field Access
      target-ppc: Bug: VSX Convert to Integer Should Truncate
      softfloat: Introduce float32_to_uint64_round_to_zero
      pseries: Update SLOF firmware image to qemu-slof-20140404
      PPC: E500: Set PIR default reset value rather than SPR value
    
    Signed-off-by: Peter Maydell <peter.maydell at linaro.org>

commit e792933ce1a9229d01b00f02caa39c39c30bcce8
Merge: 55519a4 9854202
Author: Peter Maydell <peter.maydell at linaro.org>
Date:   Tue Apr 8 10:41:30 2014 +0100

    Merge remote-tracking branch 'remotes/mdroth/qga-pull-2014-4-7' into staging
    
    * remotes/mdroth/qga-pull-2014-4-7:
      vss-win32: Fix build with mingw64-headers-3.1.0
      Makefile: add qga-vss-dll-obj-y to nested variables
    
    Signed-off-by: Peter Maydell <peter.maydell at linaro.org>

commit 06f6e12491fd767b3b23573c438f925f6092e897
Author: Alexander Graf <agraf at suse.de>
Date:   Tue Apr 8 01:42:53 2014 +0200

    PPC: Add l1 cache sizes for 970 and above systems
    
    Book3s_64 guests expect the L1 cache size in device tree, so let's give
    them proper values for all CPU types we support.
    
    This fixes a "not compliant" warning with sles11 guests on -M pseries for me.
    
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/target-ppc/translate_init.c b/target-ppc/translate_init.c
index d07e186..4d94015 100644
--- a/target-ppc/translate_init.c
+++ b/target-ppc/translate_init.c
@@ -6699,6 +6699,8 @@ POWERPC_FAMILY(970)(ObjectClass *oc, void *data)
     pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
                  POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
                  POWERPC_FLAG_BUS_CLK;
+    pcc->l1_dcache_size = 0x8000;
+    pcc->l1_icache_size = 0x10000;
 }
 
 static int check_pow_970FX (CPUPPCState *env)
@@ -6791,6 +6793,8 @@ POWERPC_FAMILY(970FX)(ObjectClass *oc, void *data)
     pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
                  POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
                  POWERPC_FLAG_BUS_CLK;
+    pcc->l1_dcache_size = 0x8000;
+    pcc->l1_icache_size = 0x10000;
 }
 
 static int check_pow_970MP (CPUPPCState *env)
@@ -6877,6 +6881,8 @@ POWERPC_FAMILY(970MP)(ObjectClass *oc, void *data)
     pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
                  POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
                  POWERPC_FLAG_BUS_CLK;
+    pcc->l1_dcache_size = 0x8000;
+    pcc->l1_icache_size = 0x10000;
 }
 
 static void init_proc_power5plus(CPUPPCState *env)
@@ -6967,6 +6973,8 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
     pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
                  POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
                  POWERPC_FLAG_BUS_CLK;
+    pcc->l1_dcache_size = 0x8000;
+    pcc->l1_icache_size = 0x10000;
 }
 
 static void init_proc_POWER7 (CPUPPCState *env)
commit 6a2b3d89fa49ec060db646d196864a8fd15c10cf
Author: Alexander Graf <agraf at suse.de>
Date:   Mon Apr 7 16:48:42 2014 +0200

    ppce500_spin: Initialize struct properly
    
    The spinning struct is in guest endianness, so we need to initialize
    its variables in guest endianness too.
    
    This fixes booting e500 guests with SMP on x86 for me.
    
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c
index f9fdc8c..d49f2b8 100644
--- a/hw/ppc/ppce500_spin.c
+++ b/hw/ppc/ppce500_spin.c
@@ -65,9 +65,9 @@ static void spin_reset(void *opaque)
     for (i = 0; i < MAX_CPUS; i++) {
         SpinInfo *info = &s->spin[i];
 
-        info->pir = i;
-        info->r3 = i;
-        info->addr = 1;
+        stl_p(&info->pir, i);
+        stq_p(&info->r3, i);
+        stq_p(&info->addr, 1);
     }
 }
 
commit 05edc26c61d416831822b3186df099e8e21745b9
Author: Alexander Graf <agraf at suse.de>
Date:   Sun Apr 6 22:40:47 2014 +0200

    PPC: Only enter MSR_POW when no interrupts pending
    
    We were entering the power saving state even when interrupts (like an
    external interrupt or a decrementer interrupt) were still in flight.
    
    In case we find a pending interrupt, don't enter power saving state.
    
    Signed-off-by: Alexander Graf <agraf at suse.de>
    Reviewed-by: Tom Musta <tmusta at gmail.com>

diff --git a/target-ppc/helper_regs.h b/target-ppc/helper_regs.h
index f7ec9c2..271fddf 100644
--- a/target-ppc/helper_regs.h
+++ b/target-ppc/helper_regs.h
@@ -101,7 +101,7 @@ static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
     hreg_compute_hflags(env);
 #if !defined(CONFIG_USER_ONLY)
     if (unlikely(msr_pow == 1)) {
-        if ((*env->check_pow)(env)) {
+        if (!env->pending_interrupts && (*env->check_pow)(env)) {
             cs->halted = 1;
             excp = EXCP_HALTED;
         }
commit e81a982aa5398269a2cc344091ffa4930bdd242f
Author: Alexander Graf <agraf at suse.de>
Date:   Sun Apr 6 01:32:06 2014 +0200

    PPC: Clean up DECR implementation
    
    There are 3 different variants of the decrementor for BookE and BookS.
    
    The BookE variant sets TSR[DIS] to 1 when the DEC value becomes 1 or 0. TSR[DIS]
    is then the indicator whether the decrementor interrupt line is asserted or not.
    
    The old BookS variant treats DEC as an edge interrupt that gets triggered when
    the DEC value's top bit turns 1 from 0.
    
    The new BookS variant maintains the assertion bit inside DEC itself. Whenever
    the DEC value becomes negative (top bit set) the DEC interrupt line is asserted.
    
    So far we implemented mostly the old BookS variant. Let's do them all properly.
    
    This fixes booting pseries ppc64 guest images in TCG mode for me.
    
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c
index 9c2a132..71df471 100644
--- a/hw/ppc/ppc.c
+++ b/hw/ppc/ppc.c
@@ -620,6 +620,13 @@ static void cpu_ppc_tb_start (CPUPPCState *env)
     }
 }
 
+bool ppc_decr_clear_on_delivery(CPUPPCState *env)
+{
+    ppc_tb_t *tb_env = env->tb_env;
+    int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
+    return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
+}
+
 static inline uint32_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
 {
     ppc_tb_t *tb_env = env->tb_env;
@@ -677,6 +684,11 @@ static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
     ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
 }
 
+static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
+{
+    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
+}
+
 static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
 {
     /* Raise it */
@@ -684,11 +696,16 @@ static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
     ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
 }
 
+static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
+{
+    ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
+}
+
 static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
                                  QEMUTimer *timer,
-                                 void (*raise_excp)(PowerPCCPU *),
-                                 uint32_t decr, uint32_t value,
-                                 int is_excp)
+                                 void (*raise_excp)(void *),
+                                 void (*lower_excp)(PowerPCCPU *),
+                                 uint32_t decr, uint32_t value)
 {
     CPUPPCState *env = &cpu->env;
     ppc_tb_t *tb_env = env->tb_env;
@@ -702,59 +719,74 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
         return;
     }
 
-    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
-    next = now + muldiv64(value, get_ticks_per_sec(), tb_env->decr_freq);
-    if (is_excp) {
-        next += *nextp - now;
+    /*
+     * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
+     * interrupt.
+     *
+     * If we get a really small DEC value, we can assume that by the time we
+     * handled it we should inject an interrupt already.
+     *
+     * On MSB level based DEC implementations the MSB always means the interrupt
+     * is pending, so raise it on those.
+     *
+     * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
+     * an edge interrupt, so raise it here too.
+     */
+    if ((value < 3) ||
+        ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && (value & 0x80000000)) ||
+        ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && (value & 0x80000000)
+          && !(decr & 0x80000000))) {
+        (*raise_excp)(cpu);
+        return;
     }
-    if (next == now) {
-        next++;
+
+    /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
+    if (!(value & 0x80000000) && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
+        (*lower_excp)(cpu);
     }
+
+    /* Calculate the next timer event */
+    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+    next = now + muldiv64(value, get_ticks_per_sec(), tb_env->decr_freq);
     *nextp = next;
+
     /* Adjust timer */
     timer_mod(timer, next);
-
-    /* If we set a negative value and the decrementer was positive, raise an
-     * exception.
-     */
-    if ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED)
-        && (value & 0x80000000)
-        && !(decr & 0x80000000)) {
-        (*raise_excp)(cpu);
-    }
 }
 
 static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, uint32_t decr,
-                                       uint32_t value, int is_excp)
+                                       uint32_t value)
 {
     ppc_tb_t *tb_env = cpu->env.tb_env;
 
     __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
-                         &cpu_ppc_decr_excp, decr, value, is_excp);
+                         tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
+                         value);
 }
 
 void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value)
 {
     PowerPCCPU *cpu = ppc_env_get_cpu(env);
 
-    _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, 0);
+    _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value);
 }
 
 static void cpu_ppc_decr_cb(void *opaque)
 {
     PowerPCCPU *cpu = opaque;
 
-    _cpu_ppc_store_decr(cpu, 0x00000000, 0xFFFFFFFF, 1);
+    cpu_ppc_decr_excp(cpu);
 }
 
 static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, uint32_t hdecr,
-                                        uint32_t value, int is_excp)
+                                        uint32_t value)
 {
     ppc_tb_t *tb_env = cpu->env.tb_env;
 
     if (tb_env->hdecr_timer != NULL) {
         __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
-                             &cpu_ppc_hdecr_excp, hdecr, value, is_excp);
+                             tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
+                             hdecr, value);
     }
 }
 
@@ -762,14 +794,14 @@ void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value)
 {
     PowerPCCPU *cpu = ppc_env_get_cpu(env);
 
-    _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value, 0);
+    _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value);
 }
 
 static void cpu_ppc_hdecr_cb(void *opaque)
 {
     PowerPCCPU *cpu = opaque;
 
-    _cpu_ppc_store_hdecr(cpu, 0x00000000, 0xFFFFFFFF, 1);
+    cpu_ppc_hdecr_excp(cpu);
 }
 
 static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value)
@@ -792,8 +824,8 @@ static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
      * if a decrementer exception is pending when it enables msr_ee at startup,
      * it's not ready to handle it...
      */
-    _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 0);
-    _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 0);
+    _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
+    _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF);
     cpu_ppc_store_purr(cpu, 0x0000000000000000ULL);
 }
 
@@ -806,6 +838,10 @@ clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
     tb_env = g_malloc0(sizeof(ppc_tb_t));
     env->tb_env = tb_env;
     tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
+    if (env->insns_flags & PPC_SEGMENT_64B) {
+        /* All Book3S 64bit CPUs implement level based DEC logic */
+        tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
+    }
     /* Create new timer */
     tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
     if (0) {
diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h
index 835418a..d71bd07 100644
--- a/include/hw/ppc/ppc.h
+++ b/include/hw/ppc/ppc.h
@@ -44,6 +44,9 @@ struct ppc_tb_t {
 #define PPC_DECR_ZERO_TRIGGERED      (1 << 3) /* Decr interrupt triggered when
                                                * the decrementer reaches zero.
                                                */
+#define PPC_DECR_UNDERFLOW_LEVEL     (1 << 4) /* Decr interrupt active when
+                                               * the most significant bit is 1.
+                                               */
 
 uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset);
 clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq);
diff --git a/target-ppc/cpu.h b/target-ppc/cpu.h
index 2719c08..d498340 100644
--- a/target-ppc/cpu.h
+++ b/target-ppc/cpu.h
@@ -1133,6 +1133,7 @@ uint64_t cpu_ppc_load_atbl (CPUPPCState *env);
 uint32_t cpu_ppc_load_atbu (CPUPPCState *env);
 void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value);
 void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value);
+bool ppc_decr_clear_on_delivery(CPUPPCState *env);
 uint32_t cpu_ppc_load_decr (CPUPPCState *env);
 void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value);
 uint32_t cpu_ppc_load_hdecr (CPUPPCState *env);
diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c
index 19bc6b6..4fa297d 100644
--- a/target-ppc/excp_helper.c
+++ b/target-ppc/excp_helper.c
@@ -723,7 +723,6 @@ void ppc_hw_interrupt(CPUPPCState *env)
     if ((msr_ee != 0 || msr_hv == 0 || msr_pr != 0) && hdice != 0) {
         /* Hypervisor decrementer exception */
         if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
-            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
             return;
         }
@@ -767,7 +766,9 @@ void ppc_hw_interrupt(CPUPPCState *env)
         }
         /* Decrementer exception */
         if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
-            env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
+            if (ppc_decr_clear_on_delivery(env)) {
+                env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
+            }
             powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
             return;
         }
commit 6cd7db3d92d44344d75feb432e3ece8587e1afd4
Author: Tom Musta <tommusta at gmail.com>
Date:   Mon Mar 31 16:04:03 2014 -0500

    target-ppc: Correct VSX Integer to FP Conversion
    
    This patch corrects the VSX integer to floating point conversion instructions
    by using the endian correct accessors.  The auxiliary "j" index used by the
    existing macros is now obsolete and is removed.  The JOFFSET preprocessor
    macro is also obsolete and removed.
    
    Signed-off-by: Tom Musta <tommusta at gmail.com>
    Tested-by: Tom Musta <tommusta at gmail.com>
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index abba703..c6f484f 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -2487,12 +2487,6 @@ VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0)
 VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1)
 VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1)
 
-#if defined(HOST_WORDS_BIGENDIAN)
-#define JOFFSET 0
-#else
-#define JOFFSET 1
-#endif
-
 /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
  *   op    - instruction mnemonic
  *   nels  - number of elements (1, 2 or 4)
@@ -2614,7 +2608,7 @@ VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
  *   jdef  - definition of the j index (i or 2*i)
  *   sfprf - set FPRF
  */
-#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, jdef, sfprf, r2sp) \
+#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp)  \
 void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
 {                                                                       \
     ppc_vsr_t xt, xb;                                                   \
@@ -2624,7 +2618,6 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
     getVSR(xT(opcode), &xt, env);                                       \
                                                                         \
     for (i = 0; i < nels; i++) {                                        \
-        int j = jdef;                                                   \
         xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);             \
         if (r2sp) {                                                     \
             xt.tfld = helper_frsp(env, xt.tfld);                        \
@@ -2638,22 +2631,18 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
     helper_float_check_status(env);                                     \
 }
 
-VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, u64[j], f64[i], i, 1, 0)
-VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, u64[j], f64[i], i, 1, 0)
-VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, u64[j], f64[i], i, 1, 1)
-VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, u64[j], f64[i], i, 1, 1)
-VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, u64[j], f64[i], i, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, u64[j], f64[i], i, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, u32[j], f64[i], \
-                  2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, u32[j], f64[i], \
-                  2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, u64[i], f32[j], \
-                  2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, u64[i], f32[j], \
-                  2*i + JOFFSET, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, u32[j], f32[i], i, 0, 0)
-VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, u32[j], f32[i], i, 0, 0)
+VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
 
 /* For "use current rounding mode", define a value that will not be one of
  * the existing rounding model enums.
commit d1dec5ef550802f76ffb8cdec50e6d50467e877e
Author: Tom Musta <tommusta at gmail.com>
Date:   Mon Mar 31 16:04:02 2014 -0500

    target-ppc: Correct VSX FP to Integer Conversion
    
    This patch corrects the VSX floating point to integer conversion
    instructions by using the endian correct accessors.  The auxiliary
    "j" index used by the existing macros is now obsolete and is removed.
    
    Signed-off-by: Tom Musta <tommusta at gmail.com>
    Tested-by: Tom Musta <tommusta at gmail.com>
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index 12bec90..abba703 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -2555,10 +2555,9 @@ uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
  *   ttp   - target type (int32, uint32, int64 or uint64)
  *   sfld  - source vsr_t field
  *   tfld  - target vsr_t field
- *   jdef  - definition of the j index (i or 2*i)
  *   rnan  - resulting NaN
  */
-#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, jdef, rnan)        \
+#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan)              \
 void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
 {                                                                            \
     ppc_vsr_t xt, xb;                                                        \
@@ -2568,7 +2567,6 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
     getVSR(xT(opcode), &xt, env);                                            \
                                                                              \
     for (i = 0; i < nels; i++) {                                             \
-        int j = jdef;                                                        \
         if (unlikely(stp##_is_any_nan(xb.sfld))) {                           \
             if (stp##_is_signaling_nan(xb.sfld)) {                           \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
@@ -2588,27 +2586,23 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
     helper_float_check_status(env);                                          \
 }
 
-VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, f64[j], u64[i], i, \
+VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
                   0x8000000000000000ULL)
-VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, f64[i], u32[j], \
-                  2*i + JOFFSET, 0x80000000U)
-VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, f64[j], u64[i], i, 0ULL)
-VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, f64[i], u32[j], \
-                  2*i + JOFFSET, 0U)
-VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, f64[j], u64[i], i, \
+VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
+                  0x80000000U)
+VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
+VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
+VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
                   0x8000000000000000ULL)
-VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, f64[i], u32[j], \
-                  2*i + JOFFSET, 0x80000000U)
-VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, f64[j], u64[i], i, 0ULL)
-VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, f64[i], u32[j], \
-                  2*i + JOFFSET, 0U)
-VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, f32[j], u64[i], \
-                  2*i + JOFFSET, 0x8000000000000000ULL)
-VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, f32[j], u32[j], i, \
+VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
                   0x80000000U)
-VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, f32[j], u64[i], \
-                  2*i + JOFFSET, 0ULL)
-VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, f32[j], u32[i], i, 0U)
+VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
+VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
+VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
+                  0x8000000000000000ULL)
+VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
+VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
+VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
 
 /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
  *   op    - instruction mnemonic
commit 6bbad7a91efe49b080391a45bc6305449050465a
Author: Tom Musta <tommusta at gmail.com>
Date:   Mon Mar 31 16:04:01 2014 -0500

    target-ppc: Correct VSX FP to FP Conversions
    
    This change corrects the VSX double precision to single precision and
    single precision to double precisions conversion routines.  The endian
    correct accessors are now used.  The auxiliary "j" index is no longer
    necessary and is eliminated.
    
    Signed-off-by: Tom Musta <tommusta at gmail.com>
    Tested-by: Tom Musta <tommusta at gmail.com>
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index 6233d5e..12bec90 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -2512,7 +2512,6 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                \
     getVSR(xT(opcode), &xt, env);                                  \
                                                                    \
     for (i = 0; i < nels; i++) {                                   \
-        int j = 2*i + JOFFSET;                                     \
         xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);        \
         if (unlikely(stp##_is_signaling_nan(xb.sfld))) {           \
             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
@@ -2528,10 +2527,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                \
     helper_float_check_status(env);                                \
 }
 
-VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, f64[i], f32[j], 1)
-VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, f32[j], f64[i], 1)
-VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, f64[i], f32[j], 0)
-VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, f32[j], f64[i], 0)
+VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
+VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
+VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
+VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
 
 uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
 {
commit 50fc89e7b1a2837a2d92025aa2ed161d8439743b
Author: Tom Musta <tommusta at gmail.com>
Date:   Mon Mar 31 16:04:00 2014 -0500

    target-ppc: Correct VSX Scalar Compares
    
    This change fixes the VSX scalar compare instructions.  The existing usage of "x.f64[0]"
    is changed to "x.VsrD(0)".
    
    Signed-off-by: Tom Musta <tommusta at gmail.com>
    Tested-by: Tom Musta <tommusta at gmail.com>
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index 1c37b30..6233d5e 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -2360,10 +2360,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
     getVSR(xA(opcode), &xa, env);                                        \
     getVSR(xB(opcode), &xb, env);                                        \
                                                                          \
-    if (unlikely(float64_is_any_nan(xa.f64[0]) ||                        \
-                 float64_is_any_nan(xb.f64[0]))) {                       \
-        if (float64_is_signaling_nan(xa.f64[0]) ||                       \
-            float64_is_signaling_nan(xb.f64[0])) {                       \
+    if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||                       \
+                 float64_is_any_nan(xb.VsrD(0)))) {                      \
+        if (float64_is_signaling_nan(xa.VsrD(0)) ||                      \
+            float64_is_signaling_nan(xb.VsrD(0))) {                      \
             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);       \
         }                                                                \
         if (ordered) {                                                   \
@@ -2371,9 +2371,10 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
         }                                                                \
         cc = 1;                                                          \
     } else {                                                             \
-        if (float64_lt(xa.f64[0], xb.f64[0], &env->fp_status)) {         \
+        if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) {       \
             cc = 8;                                                      \
-        } else if (!float64_le(xa.f64[0], xb.f64[0], &env->fp_status)) { \
+        } else if (!float64_le(xa.VsrD(0), xb.VsrD(0),                   \
+                               &env->fp_status)) { \
             cc = 4;                                                      \
         } else {                                                         \
             cc = 2;                                                      \
commit bcb7652e8dd185f2acc36fd2b9eb7b6dffcc8c47
Author: Tom Musta <tommusta at gmail.com>
Date:   Mon Mar 31 16:03:59 2014 -0500

    target-ppc: Correct Simple VSR LE Host Inversions
    
    A common pattern in the VSX helper code macros is the use of "x.fld[i]" where
    "x" is a VSR and "fld" is an argument to a macro ("f64" or "f32" is passed).
    This is not always correct on LE hosts.
    
    This change addresses all instances of this pattern to be "x.fld" where "fld" is:
    
      - "VsrD(0)" for scalar instructions accessing 64-bit numbers
      - "VsrD(i)" for vector instructions accessing 64-bit numbers
      - "VsrW(i)" for vector instructions accessing 32-bit numbers
    
    Note that there are no instances of this pattern where a scalar instruction
    accesses a 32-bit number.
    
    Note also that it would be correct to use "VsrD(i)" for scalar instructions since
    the loop index is only ever "0".  I have choosen to use "VsrD(0)" instead ... it
    seems a little clearer.
    
    Signed-off-by: Tom Musta <tommusta at gmail.com>
    Tested-by: Tom Musta <tommusta at gmail.com>
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index 9fc7dd8..1c37b30 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -1820,7 +1820,7 @@ static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
  *   op    - operation (add or sub)
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  *   sfprf - set FPRF
  */
 #define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp)                    \
@@ -1837,44 +1837,44 @@ void helper_##name(CPUPPCState *env, uint32_t opcode)                        \
     for (i = 0; i < nels; i++) {                                             \
         float_status tstat = env->fp_status;                                 \
         set_float_exception_flags(0, &tstat);                                \
-        xt.fld[i] = tp##_##op(xa.fld[i], xb.fld[i], &tstat);                 \
+        xt.fld = tp##_##op(xa.fld, xb.fld, &tstat);                          \
         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
                                                                              \
         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
-            if (tp##_is_infinity(xa.fld[i]) && tp##_is_infinity(xb.fld[i])) {\
+            if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {      \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);    \
-            } else if (tp##_is_signaling_nan(xa.fld[i]) ||                   \
-                       tp##_is_signaling_nan(xb.fld[i])) {                   \
+            } else if (tp##_is_signaling_nan(xa.fld) ||                      \
+                       tp##_is_signaling_nan(xb.fld)) {                      \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
             }                                                                \
         }                                                                    \
                                                                              \
         if (r2sp) {                                                          \
-            xt.fld[i] = helper_frsp(env, xt.fld[i]);                         \
+            xt.fld = helper_frsp(env, xt.fld);                               \
         }                                                                    \
                                                                              \
         if (sfprf) {                                                         \
-            helper_compute_fprf(env, xt.fld[i], sfprf);                      \
+            helper_compute_fprf(env, xt.fld, sfprf);                         \
         }                                                                    \
     }                                                                        \
     putVSR(xT(opcode), &xt, env);                                            \
     helper_float_check_status(env);                                          \
 }
 
-VSX_ADD_SUB(xsadddp, add, 1, float64, f64, 1, 0)
-VSX_ADD_SUB(xsaddsp, add, 1, float64, f64, 1, 1)
-VSX_ADD_SUB(xvadddp, add, 2, float64, f64, 0, 0)
-VSX_ADD_SUB(xvaddsp, add, 4, float32, f32, 0, 0)
-VSX_ADD_SUB(xssubdp, sub, 1, float64, f64, 1, 0)
-VSX_ADD_SUB(xssubsp, sub, 1, float64, f64, 1, 1)
-VSX_ADD_SUB(xvsubdp, sub, 2, float64, f64, 0, 0)
-VSX_ADD_SUB(xvsubsp, sub, 4, float32, f32, 0, 0)
+VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
+VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
 
 /* VSX_MUL - VSX floating point multiply
  *   op    - instruction mnemonic
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  *   sfprf - set FPRF
  */
 #define VSX_MUL(op, nels, tp, fld, sfprf, r2sp)                              \
@@ -1891,25 +1891,25 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
     for (i = 0; i < nels; i++) {                                             \
         float_status tstat = env->fp_status;                                 \
         set_float_exception_flags(0, &tstat);                                \
-        xt.fld[i] = tp##_mul(xa.fld[i], xb.fld[i], &tstat);                  \
+        xt.fld = tp##_mul(xa.fld, xb.fld, &tstat);                           \
         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
                                                                              \
         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
-            if ((tp##_is_infinity(xa.fld[i]) && tp##_is_zero(xb.fld[i])) ||  \
-                (tp##_is_infinity(xb.fld[i]) && tp##_is_zero(xa.fld[i]))) {  \
+            if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) ||        \
+                (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) {        \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf);    \
-            } else if (tp##_is_signaling_nan(xa.fld[i]) ||                   \
-                       tp##_is_signaling_nan(xb.fld[i])) {                   \
+            } else if (tp##_is_signaling_nan(xa.fld) ||                      \
+                       tp##_is_signaling_nan(xb.fld)) {                      \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
             }                                                                \
         }                                                                    \
                                                                              \
         if (r2sp) {                                                          \
-            xt.fld[i] = helper_frsp(env, xt.fld[i]);                         \
+            xt.fld = helper_frsp(env, xt.fld);                               \
         }                                                                    \
                                                                              \
         if (sfprf) {                                                         \
-            helper_compute_fprf(env, xt.fld[i], sfprf);                      \
+            helper_compute_fprf(env, xt.fld, sfprf);                         \
         }                                                                    \
     }                                                                        \
                                                                              \
@@ -1917,16 +1917,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
     helper_float_check_status(env);                                          \
 }
 
-VSX_MUL(xsmuldp, 1, float64, f64, 1, 0)
-VSX_MUL(xsmulsp, 1, float64, f64, 1, 1)
-VSX_MUL(xvmuldp, 2, float64, f64, 0, 0)
-VSX_MUL(xvmulsp, 4, float32, f32, 0, 0)
+VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
+VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
+VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
+VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
 
 /* VSX_DIV - VSX floating point divide
  *   op    - instruction mnemonic
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  *   sfprf - set FPRF
  */
 #define VSX_DIV(op, nels, tp, fld, sfprf, r2sp)                               \
@@ -1943,27 +1943,27 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
     for (i = 0; i < nels; i++) {                                              \
         float_status tstat = env->fp_status;                                  \
         set_float_exception_flags(0, &tstat);                                 \
-        xt.fld[i] = tp##_div(xa.fld[i], xb.fld[i], &tstat);                   \
+        xt.fld = tp##_div(xa.fld, xb.fld, &tstat);                            \
         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
                                                                               \
         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
-            if (tp##_is_infinity(xa.fld[i]) && tp##_is_infinity(xb.fld[i])) { \
+            if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) {       \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf);     \
-            } else if (tp##_is_zero(xa.fld[i]) &&                             \
-                tp##_is_zero(xb.fld[i])) {                                    \
+            } else if (tp##_is_zero(xa.fld) &&                                \
+                tp##_is_zero(xb.fld)) {                                       \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf);     \
-            } else if (tp##_is_signaling_nan(xa.fld[i]) ||                    \
-                tp##_is_signaling_nan(xb.fld[i])) {                           \
+            } else if (tp##_is_signaling_nan(xa.fld) ||                       \
+                tp##_is_signaling_nan(xb.fld)) {                              \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
             }                                                                 \
         }                                                                     \
                                                                               \
         if (r2sp) {                                                           \
-            xt.fld[i] = helper_frsp(env, xt.fld[i]);                          \
+            xt.fld = helper_frsp(env, xt.fld);                                \
         }                                                                     \
                                                                               \
         if (sfprf) {                                                          \
-            helper_compute_fprf(env, xt.fld[i], sfprf);                       \
+            helper_compute_fprf(env, xt.fld, sfprf);                          \
         }                                                                     \
     }                                                                         \
                                                                               \
@@ -1971,16 +1971,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
     helper_float_check_status(env);                                           \
 }
 
-VSX_DIV(xsdivdp, 1, float64, f64, 1, 0)
-VSX_DIV(xsdivsp, 1, float64, f64, 1, 1)
-VSX_DIV(xvdivdp, 2, float64, f64, 0, 0)
-VSX_DIV(xvdivsp, 4, float32, f32, 0, 0)
+VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
+VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
+VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
+VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
 
 /* VSX_RE  - VSX floating point reciprocal estimate
  *   op    - instruction mnemonic
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  *   sfprf - set FPRF
  */
 #define VSX_RE(op, nels, tp, fld, sfprf, r2sp)                                \
@@ -1994,17 +1994,17 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
     helper_reset_fpstatus(env);                                               \
                                                                               \
     for (i = 0; i < nels; i++) {                                              \
-        if (unlikely(tp##_is_signaling_nan(xb.fld[i]))) {                     \
+        if (unlikely(tp##_is_signaling_nan(xb.fld))) {                        \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
         }                                                                     \
-        xt.fld[i] = tp##_div(tp##_one, xb.fld[i], &env->fp_status);           \
+        xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status);                 \
                                                                               \
         if (r2sp) {                                                           \
-            xt.fld[i] = helper_frsp(env, xt.fld[i]);                          \
+            xt.fld = helper_frsp(env, xt.fld);                                \
         }                                                                     \
                                                                               \
         if (sfprf) {                                                          \
-            helper_compute_fprf(env, xt.fld[0], sfprf);                       \
+            helper_compute_fprf(env, xt.fld, sfprf);                          \
         }                                                                     \
     }                                                                         \
                                                                               \
@@ -2012,16 +2012,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
     helper_float_check_status(env);                                           \
 }
 
-VSX_RE(xsredp, 1, float64, f64, 1, 0)
-VSX_RE(xsresp, 1, float64, f64, 1, 1)
-VSX_RE(xvredp, 2, float64, f64, 0, 0)
-VSX_RE(xvresp, 4, float32, f32, 0, 0)
+VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
+VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
+VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
+VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
 
 /* VSX_SQRT - VSX floating point square root
  *   op    - instruction mnemonic
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  *   sfprf - set FPRF
  */
 #define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp)                             \
@@ -2037,23 +2037,23 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
     for (i = 0; i < nels; i++) {                                             \
         float_status tstat = env->fp_status;                                 \
         set_float_exception_flags(0, &tstat);                                \
-        xt.fld[i] = tp##_sqrt(xb.fld[i], &tstat);                            \
+        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
                                                                              \
         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
-            if (tp##_is_neg(xb.fld[i]) && !tp##_is_zero(xb.fld[i])) {        \
+            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
-            } else if (tp##_is_signaling_nan(xb.fld[i])) {                   \
+            } else if (tp##_is_signaling_nan(xb.fld)) {                      \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
             }                                                                \
         }                                                                    \
                                                                              \
         if (r2sp) {                                                          \
-            xt.fld[i] = helper_frsp(env, xt.fld[i]);                         \
+            xt.fld = helper_frsp(env, xt.fld);                               \
         }                                                                    \
                                                                              \
         if (sfprf) {                                                         \
-            helper_compute_fprf(env, xt.fld[i], sfprf);                      \
+            helper_compute_fprf(env, xt.fld, sfprf);                         \
         }                                                                    \
     }                                                                        \
                                                                              \
@@ -2061,16 +2061,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
     helper_float_check_status(env);                                          \
 }
 
-VSX_SQRT(xssqrtdp, 1, float64, f64, 1, 0)
-VSX_SQRT(xssqrtsp, 1, float64, f64, 1, 1)
-VSX_SQRT(xvsqrtdp, 2, float64, f64, 0, 0)
-VSX_SQRT(xvsqrtsp, 4, float32, f32, 0, 0)
+VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
+VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
+VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
+VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
 
 /* VSX_RSQRTE - VSX floating point reciprocal square root estimate
  *   op    - instruction mnemonic
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  *   sfprf - set FPRF
  */
 #define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp)                           \
@@ -2086,24 +2086,24 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
     for (i = 0; i < nels; i++) {                                             \
         float_status tstat = env->fp_status;                                 \
         set_float_exception_flags(0, &tstat);                                \
-        xt.fld[i] = tp##_sqrt(xb.fld[i], &tstat);                            \
-        xt.fld[i] = tp##_div(tp##_one, xt.fld[i], &tstat);                   \
+        xt.fld = tp##_sqrt(xb.fld, &tstat);                                  \
+        xt.fld = tp##_div(tp##_one, xt.fld, &tstat);                         \
         env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
                                                                              \
         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {    \
-            if (tp##_is_neg(xb.fld[i]) && !tp##_is_zero(xb.fld[i])) {        \
+            if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) {              \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf);   \
-            } else if (tp##_is_signaling_nan(xb.fld[i])) {                   \
+            } else if (tp##_is_signaling_nan(xb.fld)) {                      \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);   \
             }                                                                \
         }                                                                    \
                                                                              \
         if (r2sp) {                                                          \
-            xt.fld[i] = helper_frsp(env, xt.fld[i]);                         \
+            xt.fld = helper_frsp(env, xt.fld);                               \
         }                                                                    \
                                                                              \
         if (sfprf) {                                                         \
-            helper_compute_fprf(env, xt.fld[i], sfprf);                      \
+            helper_compute_fprf(env, xt.fld, sfprf);                         \
         }                                                                    \
     }                                                                        \
                                                                              \
@@ -2111,16 +2111,16 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
     helper_float_check_status(env);                                          \
 }
 
-VSX_RSQRTE(xsrsqrtedp, 1, float64, f64, 1, 0)
-VSX_RSQRTE(xsrsqrtesp, 1, float64, f64, 1, 1)
-VSX_RSQRTE(xvrsqrtedp, 2, float64, f64, 0, 0)
-VSX_RSQRTE(xvrsqrtesp, 4, float32, f32, 0, 0)
+VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
+VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
+VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
+VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
 
 /* VSX_TDIV - VSX floating point test for divide
  *   op    - instruction mnemonic
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  *   emin  - minimum unbiased exponent
  *   emax  - maximum unbiased exponent
  *   nbits - number of fraction bits
@@ -2137,28 +2137,28 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
     getVSR(xB(opcode), &xb, env);                                       \
                                                                         \
     for (i = 0; i < nels; i++) {                                        \
-        if (unlikely(tp##_is_infinity(xa.fld[i]) ||                     \
-                     tp##_is_infinity(xb.fld[i]) ||                     \
-                     tp##_is_zero(xb.fld[i]))) {                        \
+        if (unlikely(tp##_is_infinity(xa.fld) ||                        \
+                     tp##_is_infinity(xb.fld) ||                        \
+                     tp##_is_zero(xb.fld))) {                           \
             fe_flag = 1;                                                \
             fg_flag = 1;                                                \
         } else {                                                        \
-            int e_a = ppc_##tp##_get_unbiased_exp(xa.fld[i]);           \
-            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld[i]);           \
+            int e_a = ppc_##tp##_get_unbiased_exp(xa.fld);              \
+            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
                                                                         \
-            if (unlikely(tp##_is_any_nan(xa.fld[i]) ||                  \
-                         tp##_is_any_nan(xb.fld[i]))) {                 \
+            if (unlikely(tp##_is_any_nan(xa.fld) ||                     \
+                         tp##_is_any_nan(xb.fld))) {                    \
                 fe_flag = 1;                                            \
             } else if ((e_b <= emin) || (e_b >= (emax-2))) {            \
                 fe_flag = 1;                                            \
-            } else if (!tp##_is_zero(xa.fld[i]) &&                      \
+            } else if (!tp##_is_zero(xa.fld) &&                         \
                        (((e_a - e_b) >= emax) ||                        \
                         ((e_a - e_b) <= (emin+1)) ||                    \
                          (e_a <= (emin+nbits)))) {                      \
                 fe_flag = 1;                                            \
             }                                                           \
                                                                         \
-            if (unlikely(tp##_is_zero_or_denormal(xb.fld[i]))) {        \
+            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
                 /* XB is not zero because of the above check and */     \
                 /* so must be denormalized.                      */     \
                 fg_flag = 1;                                            \
@@ -2169,15 +2169,15 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
 }
 
-VSX_TDIV(xstdivdp, 1, float64, f64, -1022, 1023, 52)
-VSX_TDIV(xvtdivdp, 2, float64, f64, -1022, 1023, 52)
-VSX_TDIV(xvtdivsp, 4, float32, f32, -126, 127, 23)
+VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
+VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
+VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
 
 /* VSX_TSQRT - VSX floating point test for square root
  *   op    - instruction mnemonic
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  *   emin  - minimum unbiased exponent
  *   emax  - maximum unbiased exponent
  *   nbits - number of fraction bits
@@ -2194,25 +2194,25 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
     getVSR(xB(opcode), &xb, env);                                       \
                                                                         \
     for (i = 0; i < nels; i++) {                                        \
-        if (unlikely(tp##_is_infinity(xb.fld[i]) ||                     \
-                     tp##_is_zero(xb.fld[i]))) {                        \
+        if (unlikely(tp##_is_infinity(xb.fld) ||                        \
+                     tp##_is_zero(xb.fld))) {                           \
             fe_flag = 1;                                                \
             fg_flag = 1;                                                \
         } else {                                                        \
-            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld[i]);           \
+            int e_b = ppc_##tp##_get_unbiased_exp(xb.fld);              \
                                                                         \
-            if (unlikely(tp##_is_any_nan(xb.fld[i]))) {                 \
+            if (unlikely(tp##_is_any_nan(xb.fld))) {                    \
                 fe_flag = 1;                                            \
-            } else if (unlikely(tp##_is_zero(xb.fld[i]))) {             \
+            } else if (unlikely(tp##_is_zero(xb.fld))) {                \
                 fe_flag = 1;                                            \
-            } else if (unlikely(tp##_is_neg(xb.fld[i]))) {              \
+            } else if (unlikely(tp##_is_neg(xb.fld))) {                 \
                 fe_flag = 1;                                            \
-            } else if (!tp##_is_zero(xb.fld[i]) &&                      \
+            } else if (!tp##_is_zero(xb.fld) &&                         \
                       (e_b <= (emin+nbits))) {                          \
                 fe_flag = 1;                                            \
             }                                                           \
                                                                         \
-            if (unlikely(tp##_is_zero_or_denormal(xb.fld[i]))) {        \
+            if (unlikely(tp##_is_zero_or_denormal(xb.fld))) {           \
                 /* XB is not zero because of the above check and */     \
                 /* therefore must be denormalized.               */     \
                 fg_flag = 1;                                            \
@@ -2223,15 +2223,15 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                     \
     env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
 }
 
-VSX_TSQRT(xstsqrtdp, 1, float64, f64, -1022, 52)
-VSX_TSQRT(xvtsqrtdp, 2, float64, f64, -1022, 52)
-VSX_TSQRT(xvtsqrtsp, 4, float32, f32, -126, 23)
+VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
+VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
+VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
 
 /* VSX_MADD - VSX floating point muliply/add variations
  *   op    - instruction mnemonic
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  *   maddflgs - flags for the float*muladd routine that control the
  *           various forms (madd, msub, nmadd, nmsub)
  *   afrm  - A form (1=A, 0=M)
@@ -2267,43 +2267,43 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
             /* Avoid double rounding errors by rounding the intermediate */   \
             /* result to odd.                                            */   \
             set_float_rounding_mode(float_round_to_zero, &tstat);             \
-            xt_out.fld[i] = tp##_muladd(xa.fld[i], b->fld[i], c->fld[i],      \
+            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
                                        maddflgs, &tstat);                     \
-            xt_out.fld[i] |= (get_float_exception_flags(&tstat) &             \
+            xt_out.fld |= (get_float_exception_flags(&tstat) &                \
                               float_flag_inexact) != 0;                       \
         } else {                                                              \
-            xt_out.fld[i] = tp##_muladd(xa.fld[i], b->fld[i], c->fld[i],      \
+            xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld,                  \
                                         maddflgs, &tstat);                    \
         }                                                                     \
         env->fp_status.float_exception_flags |= tstat.float_exception_flags;  \
                                                                               \
         if (unlikely(tstat.float_exception_flags & float_flag_invalid)) {     \
-            if (tp##_is_signaling_nan(xa.fld[i]) ||                           \
-                tp##_is_signaling_nan(b->fld[i]) ||                           \
-                tp##_is_signaling_nan(c->fld[i])) {                           \
+            if (tp##_is_signaling_nan(xa.fld) ||                              \
+                tp##_is_signaling_nan(b->fld) ||                              \
+                tp##_is_signaling_nan(c->fld)) {                              \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf);    \
                 tstat.float_exception_flags &= ~float_flag_invalid;           \
             }                                                                 \
-            if ((tp##_is_infinity(xa.fld[i]) && tp##_is_zero(b->fld[i])) ||   \
-                (tp##_is_zero(xa.fld[i]) && tp##_is_infinity(b->fld[i]))) {   \
-                xt_out.fld[i] = float64_to_##tp(fload_invalid_op_excp(env,    \
+            if ((tp##_is_infinity(xa.fld) && tp##_is_zero(b->fld)) ||         \
+                (tp##_is_zero(xa.fld) && tp##_is_infinity(b->fld))) {         \
+                xt_out.fld = float64_to_##tp(fload_invalid_op_excp(env,       \
                     POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status);          \
                 tstat.float_exception_flags &= ~float_flag_invalid;           \
             }                                                                 \
             if ((tstat.float_exception_flags & float_flag_invalid) &&         \
-                ((tp##_is_infinity(xa.fld[i]) ||                              \
-                  tp##_is_infinity(b->fld[i])) &&                             \
-                  tp##_is_infinity(c->fld[i]))) {                             \
+                ((tp##_is_infinity(xa.fld) ||                                 \
+                  tp##_is_infinity(b->fld)) &&                                \
+                  tp##_is_infinity(c->fld))) {                                \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf);     \
             }                                                                 \
         }                                                                     \
                                                                               \
         if (r2sp) {                                                           \
-            xt_out.fld[i] = helper_frsp(env, xt_out.fld[i]);                  \
+            xt_out.fld = helper_frsp(env, xt_out.fld);                        \
         }                                                                     \
                                                                               \
         if (sfprf) {                                                          \
-            helper_compute_fprf(env, xt_out.fld[i], sfprf);                   \
+            helper_compute_fprf(env, xt_out.fld, sfprf);                      \
         }                                                                     \
     }                                                                         \
     putVSR(xT(opcode), &xt_out, env);                                         \
@@ -2315,41 +2315,41 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
 #define NMADD_FLGS float_muladd_negate_result
 #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
 
-VSX_MADD(xsmaddadp, 1, float64, f64, MADD_FLGS, 1, 1, 0)
-VSX_MADD(xsmaddmdp, 1, float64, f64, MADD_FLGS, 0, 1, 0)
-VSX_MADD(xsmsubadp, 1, float64, f64, MSUB_FLGS, 1, 1, 0)
-VSX_MADD(xsmsubmdp, 1, float64, f64, MSUB_FLGS, 0, 1, 0)
-VSX_MADD(xsnmaddadp, 1, float64, f64, NMADD_FLGS, 1, 1, 0)
-VSX_MADD(xsnmaddmdp, 1, float64, f64, NMADD_FLGS, 0, 1, 0)
-VSX_MADD(xsnmsubadp, 1, float64, f64, NMSUB_FLGS, 1, 1, 0)
-VSX_MADD(xsnmsubmdp, 1, float64, f64, NMSUB_FLGS, 0, 1, 0)
-
-VSX_MADD(xsmaddasp, 1, float64, f64, MADD_FLGS, 1, 1, 1)
-VSX_MADD(xsmaddmsp, 1, float64, f64, MADD_FLGS, 0, 1, 1)
-VSX_MADD(xsmsubasp, 1, float64, f64, MSUB_FLGS, 1, 1, 1)
-VSX_MADD(xsmsubmsp, 1, float64, f64, MSUB_FLGS, 0, 1, 1)
-VSX_MADD(xsnmaddasp, 1, float64, f64, NMADD_FLGS, 1, 1, 1)
-VSX_MADD(xsnmaddmsp, 1, float64, f64, NMADD_FLGS, 0, 1, 1)
-VSX_MADD(xsnmsubasp, 1, float64, f64, NMSUB_FLGS, 1, 1, 1)
-VSX_MADD(xsnmsubmsp, 1, float64, f64, NMSUB_FLGS, 0, 1, 1)
-
-VSX_MADD(xvmaddadp, 2, float64, f64, MADD_FLGS, 1, 0, 0)
-VSX_MADD(xvmaddmdp, 2, float64, f64, MADD_FLGS, 0, 0, 0)
-VSX_MADD(xvmsubadp, 2, float64, f64, MSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvmsubmdp, 2, float64, f64, MSUB_FLGS, 0, 0, 0)
-VSX_MADD(xvnmaddadp, 2, float64, f64, NMADD_FLGS, 1, 0, 0)
-VSX_MADD(xvnmaddmdp, 2, float64, f64, NMADD_FLGS, 0, 0, 0)
-VSX_MADD(xvnmsubadp, 2, float64, f64, NMSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvnmsubmdp, 2, float64, f64, NMSUB_FLGS, 0, 0, 0)
-
-VSX_MADD(xvmaddasp, 4, float32, f32, MADD_FLGS, 1, 0, 0)
-VSX_MADD(xvmaddmsp, 4, float32, f32, MADD_FLGS, 0, 0, 0)
-VSX_MADD(xvmsubasp, 4, float32, f32, MSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvmsubmsp, 4, float32, f32, MSUB_FLGS, 0, 0, 0)
-VSX_MADD(xvnmaddasp, 4, float32, f32, NMADD_FLGS, 1, 0, 0)
-VSX_MADD(xvnmaddmsp, 4, float32, f32, NMADD_FLGS, 0, 0, 0)
-VSX_MADD(xvnmsubasp, 4, float32, f32, NMSUB_FLGS, 1, 0, 0)
-VSX_MADD(xvnmsubmsp, 4, float32, f32, NMSUB_FLGS, 0, 0, 0)
+VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
+VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
+VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
+VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
+VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
+VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
+VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
+VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
+
+VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
+VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
+VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
+VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
+VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
+VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
+VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
+VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
+
+VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
+VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
+VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
+VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
+VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
+VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
+
+VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
+VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
+VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
+VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
+VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
+VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
 
 #define VSX_SCALAR_CMP(op, ordered)                                      \
 void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
@@ -2398,7 +2398,7 @@ VSX_SCALAR_CMP(xscmpudp, 0)
  *   op    - operation (max or min)
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  */
 #define VSX_MAX_MIN(name, op, nels, tp, fld)                                  \
 void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
@@ -2411,9 +2411,9 @@ void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
     getVSR(xT(opcode), &xt, env);                                             \
                                                                               \
     for (i = 0; i < nels; i++) {                                              \
-        xt.fld[i] = tp##_##op(xa.fld[i], xb.fld[i], &env->fp_status);         \
-        if (unlikely(tp##_is_signaling_nan(xa.fld[i]) ||                      \
-                     tp##_is_signaling_nan(xb.fld[i]))) {                     \
+        xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status);                  \
+        if (unlikely(tp##_is_signaling_nan(xa.fld) ||                         \
+                     tp##_is_signaling_nan(xb.fld))) {                        \
             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
         }                                                                     \
     }                                                                         \
@@ -2422,18 +2422,18 @@ void helper_##name(CPUPPCState *env, uint32_t opcode)                         \
     helper_float_check_status(env);                                           \
 }
 
-VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, f64)
-VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, f64)
-VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, f32)
-VSX_MAX_MIN(xsmindp, minnum, 1, float64, f64)
-VSX_MAX_MIN(xvmindp, minnum, 2, float64, f64)
-VSX_MAX_MIN(xvminsp, minnum, 4, float32, f32)
+VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
+VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
 
 /* VSX_CMP - VSX floating point compare
  *   op    - instruction mnemonic
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  *   cmp   - comparison operation
  *   svxvc - set VXVC bit
  */
@@ -2450,23 +2450,23 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
     getVSR(xT(opcode), &xt, env);                                         \
                                                                           \
     for (i = 0; i < nels; i++) {                                          \
-        if (unlikely(tp##_is_any_nan(xa.fld[i]) ||                        \
-                     tp##_is_any_nan(xb.fld[i]))) {                       \
-            if (tp##_is_signaling_nan(xa.fld[i]) ||                       \
-                tp##_is_signaling_nan(xb.fld[i])) {                       \
+        if (unlikely(tp##_is_any_nan(xa.fld) ||                           \
+                     tp##_is_any_nan(xb.fld))) {                          \
+            if (tp##_is_signaling_nan(xa.fld) ||                          \
+                tp##_is_signaling_nan(xb.fld)) {                          \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);    \
             }                                                             \
             if (svxvc) {                                                  \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);      \
             }                                                             \
-            xt.fld[i] = 0;                                                \
+            xt.fld = 0;                                                   \
             all_true = 0;                                                 \
         } else {                                                          \
-            if (tp##_##cmp(xb.fld[i], xa.fld[i], &env->fp_status) == 1) { \
-                xt.fld[i] = -1;                                           \
+            if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == 1) {       \
+                xt.fld = -1;                                              \
                 all_false = 0;                                            \
             } else {                                                      \
-                xt.fld[i] = 0;                                            \
+                xt.fld = 0;                                               \
                 all_true = 0;                                             \
             }                                                             \
         }                                                                 \
@@ -2479,12 +2479,12 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                       \
     helper_float_check_status(env);                                       \
  }
 
-VSX_CMP(xvcmpeqdp, 2, float64, f64, eq, 0)
-VSX_CMP(xvcmpgedp, 2, float64, f64, le, 1)
-VSX_CMP(xvcmpgtdp, 2, float64, f64, lt, 1)
-VSX_CMP(xvcmpeqsp, 4, float32, f32, eq, 0)
-VSX_CMP(xvcmpgesp, 4, float32, f32, le, 1)
-VSX_CMP(xvcmpgtsp, 4, float32, f32, lt, 1)
+VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0)
+VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1)
+VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1)
+VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0)
+VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1)
+VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1)
 
 #if defined(HOST_WORDS_BIGENDIAN)
 #define JOFFSET 0
@@ -2671,7 +2671,7 @@ VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, u32[j], f32[i], i, 0, 0)
  *   op    - instruction mnemonic
  *   nels  - number of elements (1, 2 or 4)
  *   tp    - type (float32 or float64)
- *   fld   - vsr_t field (f32 or f64)
+ *   fld   - vsr_t field (VsrD(*) or VsrW(*))
  *   rmode - rounding mode
  *   sfprf - set FPRF
  */
@@ -2688,14 +2688,14 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                    \
     }                                                                  \
                                                                        \
     for (i = 0; i < nels; i++) {                                       \
-        if (unlikely(tp##_is_signaling_nan(xb.fld[i]))) {              \
+        if (unlikely(tp##_is_signaling_nan(xb.fld))) {                 \
             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);     \
-            xt.fld[i] = tp##_snan_to_qnan(xb.fld[i]);                  \
+            xt.fld = tp##_snan_to_qnan(xb.fld);                        \
         } else {                                                       \
-            xt.fld[i] = tp##_round_to_int(xb.fld[i], &env->fp_status); \
+            xt.fld = tp##_round_to_int(xb.fld, &env->fp_status);       \
         }                                                              \
         if (sfprf) {                                                   \
-            helper_compute_fprf(env, xt.fld[i], sfprf);                \
+            helper_compute_fprf(env, xt.fld, sfprf);                   \
         }                                                              \
     }                                                                  \
                                                                        \
@@ -2711,23 +2711,23 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                    \
     helper_float_check_status(env);                                    \
 }
 
-VSX_ROUND(xsrdpi, 1, float64, f64, float_round_nearest_even, 1)
-VSX_ROUND(xsrdpic, 1, float64, f64, FLOAT_ROUND_CURRENT, 1)
-VSX_ROUND(xsrdpim, 1, float64, f64, float_round_down, 1)
-VSX_ROUND(xsrdpip, 1, float64, f64, float_round_up, 1)
-VSX_ROUND(xsrdpiz, 1, float64, f64, float_round_to_zero, 1)
+VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_nearest_even, 1)
+VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
+VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
+VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
+VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
 
-VSX_ROUND(xvrdpi, 2, float64, f64, float_round_nearest_even, 0)
-VSX_ROUND(xvrdpic, 2, float64, f64, FLOAT_ROUND_CURRENT, 0)
-VSX_ROUND(xvrdpim, 2, float64, f64, float_round_down, 0)
-VSX_ROUND(xvrdpip, 2, float64, f64, float_round_up, 0)
-VSX_ROUND(xvrdpiz, 2, float64, f64, float_round_to_zero, 0)
+VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_nearest_even, 0)
+VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
+VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
+VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
+VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
 
-VSX_ROUND(xvrspi, 4, float32, f32, float_round_nearest_even, 0)
-VSX_ROUND(xvrspic, 4, float32, f32, FLOAT_ROUND_CURRENT, 0)
-VSX_ROUND(xvrspim, 4, float32, f32, float_round_down, 0)
-VSX_ROUND(xvrspip, 4, float32, f32, float_round_up, 0)
-VSX_ROUND(xvrspiz, 4, float32, f32, float_round_to_zero, 0)
+VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_nearest_even, 0)
+VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
+VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
+VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
+VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
 
 uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
 {
commit d359db00e6dfaf12cbdedd30e36f879110d4f9d1
Author: Tom Musta <tommusta at gmail.com>
Date:   Mon Mar 31 16:03:58 2014 -0500

    target-ppc: Correct LE Host Inversion of Lower VSRs
    
    This change properly orders the doublewords of the VSRs 0-31.  Because these
    registers are constructed from separate doublewords, they must be inverted
    on Little Endian hosts.  The inversion is performed both when the VSR is read
    and when it is written.
    
    Signed-off-by: Tom Musta <tommusta at gmail.com>
    Tested-by: Tom Musta <tommusta at gmail.com>
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index d79aae9..9fc7dd8 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -1793,8 +1793,8 @@ typedef union _ppc_vsr_t {
 static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
 {
     if (n < 32) {
-        vsr->f64[0] = env->fpr[n];
-        vsr->u64[1] = env->vsr[n];
+        vsr->VsrD(0) = env->fpr[n];
+        vsr->VsrD(1) = env->vsr[n];
     } else {
         vsr->u64[0] = env->avr[n-32].u64[0];
         vsr->u64[1] = env->avr[n-32].u64[1];
@@ -1804,8 +1804,8 @@ static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
 static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
 {
     if (n < 32) {
-        env->fpr[n] = vsr->f64[0];
-        env->vsr[n] = vsr->u64[1];
+        env->fpr[n] = vsr->VsrD(0);
+        env->vsr[n] = vsr->VsrD(1);
     } else {
         env->avr[n-32].u64[0] = vsr->u64[0];
         env->avr[n-32].u64[1] = vsr->u64[1];
commit 80189035de73f30e42a7f933c45cccfc4b0c56e9
Author: Tom Musta <tommusta at gmail.com>
Date:   Mon Mar 31 16:03:57 2014 -0500

    target-ppc: Define Endian-Correct Accessors for VSR Field Access
    
    This change defines accessors for VSR doubleword and word fields that
    are correct from a host Endian perspective.  This allows code to
    use the Power ISA indexing numbers in code.
    
    For example, the xscvdpsxws instruction has a target VSR that looks
    like this:
    
      0           32       64                    127
      +-----------+--------+-----------+-----------+
      | undefined | SW     | undefined | undefined |
      +-----------+--------+-----------+-----------+
    
    VSX helper code will use VsrW(1) to access this field.
    
    Signed-off-by: Tom Musta <tommusta at gmail.com>
    Tested-by: Tom Musta <tommusta at gmail.com>
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index 691d572..d79aae9 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -1782,6 +1782,14 @@ typedef union _ppc_vsr_t {
     float64 f64[2];
 } ppc_vsr_t;
 
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VsrW(i) u32[i]
+#define VsrD(i) u64[i]
+#else
+#define VsrW(i) u32[3-(i)]
+#define VsrD(i) u64[1-(i)]
+#endif
+
 static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
 {
     if (n < 32) {
commit 0453099b7d20c9fc2946ed74f1d965ae4d173d19
Author: Tom Musta <tommusta at gmail.com>
Date:   Mon Mar 31 16:03:56 2014 -0500

    target-ppc: Bug: VSX Convert to Integer Should Truncate
    
    The various VSX Convert to Integer instructions should truncate the
    floating point number to an integer value, which is equivalent to
    a round-to-zero rounding mode.  The existing VSX floating point to
    integer conversion helpers are erroneously using the rounding mode set
    int the PowerPC Floating Point Status and Control Register (FPSCR).
    This change corrects this defect by using the appropriate
    float*_to_*_round_to_zero() routines fro the softfloat library.
    
    Signed-off-by: Tom Musta <tommusta at gmail.com>
    Tested-by: Tom Musta <tommusta at gmail.com>
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index fd91239..691d572 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -2568,7 +2568,8 @@ void helper_##op(CPUPPCState *env, uint32_t opcode)                          \
             fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);            \
             xt.tfld = rnan;                                                  \
         } else {                                                             \
-            xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status);              \
+            xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld,                \
+                          &env->fp_status);                                  \
             if (env->fp_status.float_exception_flags & float_flag_invalid) { \
                 fload_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0);        \
             }                                                                \
commit a13d44896854329581ba48607d66c6b2aec157f7
Author: Tom Musta <tommusta at gmail.com>
Date:   Mon Mar 31 16:03:55 2014 -0500

    softfloat: Introduce float32_to_uint64_round_to_zero
    
    This change adds the float32_to_uint64_round_to_zero function to the softfloat
    library.  This function fills out the complement of float32 to INT round-to-zero
    conversion rountines, where INT is {int32_t, uint32_t, int64_t, uint64_t}.
    
    This contribution can be licensed under either the softfloat-2a or -2b
    license.
    
    Signed-off-by: Tom Musta <tommusta at gmail.com>
    Tested-by: Tom Musta <tommusta at gmail.com>
    Reviewed-by: Peter Maydell <peter.maydell at linaro.org>
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 5f02c16..e00a6fb 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -1628,6 +1628,26 @@ uint64 float32_to_uint64(float32 a STATUS_PARAM)
 
 /*----------------------------------------------------------------------------
 | Returns the result of converting the single-precision floating-point value
+| `a' to the 64-bit unsigned integer format.  The conversion is
+| performed according to the IEC/IEEE Standard for Binary Floating-Point
+| Arithmetic, except that the conversion is always rounded toward zero.  If
+| `a' is a NaN, the largest unsigned integer is returned.  Otherwise, if the
+| conversion overflows, the largest unsigned integer is returned.  If the
+| 'a' is negative, the result is rounded and zero is returned; values that do
+| not round to zero will raise the inexact flag.
+*----------------------------------------------------------------------------*/
+
+uint64 float32_to_uint64_round_to_zero(float32 a STATUS_PARAM)
+{
+    signed char current_rounding_mode = STATUS(float_rounding_mode);
+    set_float_rounding_mode(float_round_to_zero STATUS_VAR);
+    int64_t v = float32_to_uint64(a STATUS_VAR);
+    set_float_rounding_mode(current_rounding_mode STATUS_VAR);
+    return v;
+}
+
+/*----------------------------------------------------------------------------
+| Returns the result of converting the single-precision floating-point value
 | `a' to the 64-bit two's complement integer format.  The conversion is
 | performed according to the IEC/IEEE Standard for Binary Floating-Point
 | Arithmetic, except that the conversion is always rounded toward zero.  If
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
index db878c1..4b3090c 100644
--- a/include/fpu/softfloat.h
+++ b/include/fpu/softfloat.h
@@ -342,6 +342,7 @@ uint32 float32_to_uint32( float32 STATUS_PARAM );
 uint32 float32_to_uint32_round_to_zero( float32 STATUS_PARAM );
 int64 float32_to_int64( float32 STATUS_PARAM );
 uint64 float32_to_uint64(float32 STATUS_PARAM);
+uint64 float32_to_uint64_round_to_zero(float32 STATUS_PARAM);
 int64 float32_to_int64_round_to_zero( float32 STATUS_PARAM );
 float64 float32_to_float64( float32 STATUS_PARAM );
 floatx80 float32_to_floatx80( float32 STATUS_PARAM );
commit 3636226ae45a9b04af5202a18f445680c88473e7
Author: Alexey Kardashevskiy <aik at ozlabs.ru>
Date:   Fri Apr 4 11:57:35 2014 +1100

    pseries: Update SLOF firmware image to qemu-slof-20140404
    
    The change log is:
      > Isolate sc 1 detection logic
      > build: auto-detect ppc64 architecture
      > cas: increase hcall buffer size to accomodate 256 cpus
      > usb: change device tree naming
      > usb-core: adjust port numbers in set_address
      > virtio-scsi: correct srplun comment
      > Fix kernel loading
      > Workaround to make grub2 assign server ip from dhcp ack packet only
      > ELF: Enter LE binary in LE mode
      > ELF loading should fail for virt != phys
    
    Signed-off-by: Alexey Kardashevskiy <aik at ozlabs.ru>
    Signed-off-by: Alexander Graf <agraf at suse.de>

diff --git a/pc-bios/README b/pc-bios/README
index ef6008d..4381718 100644
--- a/pc-bios/README
+++ b/pc-bios/README
@@ -17,7 +17,7 @@
 - SLOF (Slimline Open Firmware) is a free IEEE 1275 Open Firmware
   implementation for certain IBM POWER hardware.  The sources are at
   https://github.com/aik/SLOF, and the image currently in qemu is
-  built from git tag qemu-slof-20140304.
+  built from git tag qemu-slof-20140404.
 
 - sgabios (the Serial Graphics Adapter option ROM) provides a means for
   legacy x86 software to communicate with an attached serial console as
diff --git a/pc-bios/slof.bin b/pc-bios/slof.bin
index a742bff..972e012 100644
Binary files a/pc-bios/slof.bin and b/pc-bios/slof.bin differ
diff --git a/roms/SLOF b/roms/SLOF
index af6b7bf..c90b50b 160000
--- a/roms/SLOF
+++ b/roms/SLOF
@@ -1 +1 @@
-Subproject commit af6b7bf5879b6cd6825de2a107cb0e3219fb1df5
+Subproject commit c90b50b5055f976a0da3c032f26fb80157292adc
commit 6a450df9b8369c0cff7a1d6774d56f0862abd4e3
Author: Alexander Graf <agraf at suse.de>
Date:   Thu Apr 3 20:45:27 2014 +0200

    PPC: E500: Set PIR default reset value rather than SPR value
    
    We now reset SPRs to their reset values on CPU reset. So if we want
    to have an SPR persistently changed, we need to change its default
    reset value rather than the value itself manually.
    
    Do this for SPR_BOOKE_PIR, fixing e500v2 SMP boot.
    
    Reported-by: Frederic Konrad <fred.konrad at greensocs.com>
    Signed-off-by: Alexander Graf <agraf at suse.de>
    Tested-by: KONRAD Frederic <fred.konrad at greensocs.com>

diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c
index d7ba25f..f984b3e 100644
--- a/hw/ppc/e500.c
+++ b/hw/ppc/e500.c
@@ -649,7 +649,7 @@ void ppce500_init(QEMUMachineInitArgs *args, PPCE500Params *params)
         input = (qemu_irq *)env->irq_inputs;
         irqs[i][OPENPIC_OUTPUT_INT] = input[PPCE500_INPUT_INT];
         irqs[i][OPENPIC_OUTPUT_CINT] = input[PPCE500_INPUT_CINT];
-        env->spr[SPR_BOOKE_PIR] = cs->cpu_index = i;
+        env->spr_cb[SPR_BOOKE_PIR].default_value = cs->cpu_index = i;
         env->mpic_iack = MPC8544_CCSRBAR_BASE +
                          MPC8544_MPIC_REGS_OFFSET + 0xa0;
 
commit 9854202b57e0ac197cf2bee3d7fbf79ba58f16c5
Author: Tomoki Sekiyama <tomoki.sekiyama at hds.com>
Date:   Wed Mar 26 14:28:51 2014 -0400

    vss-win32: Fix build with mingw64-headers-3.1.0
    
    In mingw64-headers-3.1.0, definition of _com_issue_error() is added, which
    conflicts with definition in install.cpp. This adds version checking for
    mingw headers to disable the definition when the headers>=3.1 is used.
    
    Signed-off-by: Tomoki Sekiyama <tomoki.sekiyama at hds.com>
    Signed-off-by: Michael Roth <mdroth at linux.vnet.ibm.com>

diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp
index b791a6c..b0e4426 100644
--- a/qga/vss-win32/install.cpp
+++ b/qga/vss-win32/install.cpp
@@ -75,10 +75,13 @@ static void errmsg_dialog(DWORD err, const char *text, const char *opt = "")
 
 #define chk(status) _chk(hr, status, "Failed to " #status, out)
 
+#if !defined(__MINGW64_VERSION_MAJOR) || !defined(__MINGW64_VERSION_MINOR) || \
+    __MINGW64_VERSION_MAJOR * 100 + __MINGW64_VERSION_MINOR < 301
 void __stdcall _com_issue_error(HRESULT hr)
 {
     errmsg(hr, "Unexpected error in COM");
 }
+#endif
 
 template<class T>
 HRESULT put_Value(ICatalogObject *pObj, LPCWSTR name, T val)
commit 577a67234dd7bef8b0443804f3a81977072f8657
Author: Tomoki Sekiyama <tomoki.sekiyama at hds.com>
Date:   Wed Mar 26 14:28:45 2014 -0400

    Makefile: add qga-vss-dll-obj-y to nested variables
    
    The build rule for qga/vss-win32/qga-vss.dll is broken by commit
    ba1183da9a10b94611cad88c44a5c6df005f9b55, because it misses
    qga-vss-dll-obj-y in the list of nested variables.
    This fixes build of qga-vss.dll by adding qga-vss-dll-obj-y to the list.
    
    Signed-off-by: Tomoki Sekiyama <tomoki.sekiyama at hds.com>
    Signed-off-by: Michael Roth <mdroth at linux.vnet.ibm.com>

diff --git a/Makefile b/Makefile
index d622799..423e373 100644
--- a/Makefile
+++ b/Makefile
@@ -133,6 +133,7 @@ dummy := $(call unnest-vars,, \
                 stub-obj-y \
                 util-obj-y \
                 qga-obj-y \
+                qga-vss-dll-obj-y \
                 block-obj-y \
                 block-obj-m \
                 common-obj-y \


More information about the Spice-commits mailing list