[OE-core] [for-denzil] eglibc/gcc: add patches to fix eglibc 2.15 build
McClintock Matthew-B29882
B29882 at freescale.com
Thu Aug 23 04:05:25 UTC 2012
On Wed, Aug 22, 2012 at 11:02 PM, Matthew McClintock <msm at freescale.com> wrote:
> This drops one patch against eglibc for 2.15 and adds two new ones,
> also it adds a gcc patch. We use all of these internally and they
> are tested quite well.
>
> Signed-off-by: Matthew McClintock <msm at freescale.com>
FYI, this patch is going straight to denzil becuase eglibc just got
removed from master ;)
-M
> ---
> .../eglibc/eglibc-2.15/glibc.no_sqrt.patch | 15 +
> meta/recipes-core/eglibc/eglibc_2.15.bb | 3 +-
> meta/recipes-devtools/gcc/gcc-4.6.inc | 5 +-
> .../gcc/gcc-4.6/gcc.e6500-FSF46.patch | 4015 ++++++++++++++++++++
> .../gcc/gcc-4.6/gcc.no_power_builtins.patch | 30 +
> .../gcc/gcc-4.6/powerpc-e5500.patch | 465 ---
> 6 files changed, 4065 insertions(+), 468 deletions(-)
> create mode 100644 meta/recipes-core/eglibc/eglibc-2.15/glibc.no_sqrt.patch
> create mode 100644 meta/recipes-devtools/gcc/gcc-4.6/gcc.e6500-FSF46.patch
> create mode 100644 meta/recipes-devtools/gcc/gcc-4.6/gcc.no_power_builtins.patch
> delete mode 100644 meta/recipes-devtools/gcc/gcc-4.6/powerpc-e5500.patch
>
> diff --git a/meta/recipes-core/eglibc/eglibc-2.15/glibc.no_sqrt.patch b/meta/recipes-core/eglibc/eglibc-2.15/glibc.no_sqrt.patch
> new file mode 100644
> index 0000000..7f5136e
> --- /dev/null
> +++ b/meta/recipes-core/eglibc/eglibc-2.15/glibc.no_sqrt.patch
> @@ -0,0 +1,15 @@
> +Upstream-Status: Pending
> +
> +People are working to include this fixes upstream
> +
> +--- libc/sysdeps/powerpc/fpu/math_private.h-orig 2012-07-10 13:23:19.373254002 -0500
> ++++ libc/sysdeps/powerpc/fpu/math_private.h 2012-07-10 13:23:46.058254001 -0500
> +@@ -27,7 +27,7 @@
> +
> + #include <math/math_private.h>
> +
> +-# if __WORDSIZE == 64 || defined _ARCH_PWR4
> ++# if 0
> + # define __CPU_HAS_FSQRT 1
> +
> + #ifndef __ieee754_sqrt
> diff --git a/meta/recipes-core/eglibc/eglibc_2.15.bb b/meta/recipes-core/eglibc/eglibc_2.15.bb
> index f5219d1..dc2a546 100644
> --- a/meta/recipes-core/eglibc/eglibc_2.15.bb
> +++ b/meta/recipes-core/eglibc/eglibc_2.15.bb
> @@ -3,7 +3,7 @@ require eglibc.inc
> SRCREV = "17386"
>
> DEPENDS += "gperf-native"
> -PR = "r7"
> +PR = "r8"
> PR_append = "+svnr${SRCPV}"
>
> EGLIBC_BRANCH="eglibc-2_15"
> @@ -23,6 +23,7 @@ SRC_URI = "svn://www.eglibc.org/svn/branches/;module=${EGLIBC_BRANCH};proto=http
> file://ppc-sqrt_finite.patch \
> file://GLRO_dl_debug_mask.patch \
> file://initgroups_keys.patch \
> + file://glibc.no_sqrt.patch \
> "
> LIC_FILES_CHKSUM = "file://LICENSES;md5=98a1128c4b58120182cbea3b1752d8b9 \
> file://COPYING;md5=393a5ca445f6965873eca0259a17f833 \
> diff --git a/meta/recipes-devtools/gcc/gcc-4.6.inc b/meta/recipes-devtools/gcc/gcc-4.6.inc
> index 4c6fe28..c9529f0 100644
> --- a/meta/recipes-devtools/gcc/gcc-4.6.inc
> +++ b/meta/recipes-devtools/gcc/gcc-4.6.inc
> @@ -1,6 +1,6 @@
> require gcc-common.inc
>
> -PR = "r28"
> +PR = "r29"
>
> # Third digit in PV should be incremented after a minor release
> # happens from this branch on gcc e.g. currently its 4.6.0
> @@ -64,7 +64,6 @@ SRC_URI = "svn://gcc.gnu.org/svn/gcc/branches;module=${BRANCH};proto=http \
> file://disable_relax_pic_calls_flag.patch \
> file://COLLECT_GCC_OPTIONS.patch \
> file://use-defaults.h-and-t-oe-in-B.patch \
> - file://powerpc-e5500.patch \
> file://fix-for-ice-50099.patch \
> file://gcc-with-linker-hash-style.patch \
> file://pr46934.patch \
> @@ -74,6 +73,8 @@ SRC_URI = "svn://gcc.gnu.org/svn/gcc/branches;module=${BRANCH};proto=http \
> file://GPLUSPLUS_INCLUDE_DIR_with_sysroot.patch \
> file://fortran-cross-compile-hack.patch \
> file://cpp-honour-sysroot.patch \
> + file://gcc.e6500-FSF46.patch \
> + file://gcc.no_power_builtins.patch \
> "
>
> SRC_URI_append_sh3 = " file://sh3-installfix-fixheaders.patch "
> diff --git a/meta/recipes-devtools/gcc/gcc-4.6/gcc.e6500-FSF46.patch b/meta/recipes-devtools/gcc/gcc-4.6/gcc.e6500-FSF46.patch
> new file mode 100644
> index 0000000..2de5f77
> --- /dev/null
> +++ b/meta/recipes-devtools/gcc/gcc-4.6/gcc.e6500-FSF46.patch
> @@ -0,0 +1,4015 @@
> +Upstream-Status: Pending
> +
> +People are working to include this fixes upstream
> +
> +diff -ruN gcc-4.6.2-orig/gcc/config/rs6000/altivec.h gcc-4.6.2/gcc/config/rs6000/altivec.h
> +--- gcc-4.6.2-orig/gcc/config/rs6000/altivec.h 2011-02-02 23:42:19.000000000 -0600
> ++++ gcc-4.6.2/gcc/config/rs6000/altivec.h 2012-03-06 12:33:43.943038996 -0600
> +@@ -322,6 +322,30 @@
> + #define vec_vsx_st __builtin_vec_vsx_st
> + #endif
> +
> ++#ifdef __ALTIVEC2__
> ++/* New Altivec instructions */
> ++#define vec_absd __builtin_vec_absd
> ++#define vec_lvexbx __builtin_vec_lvexbx
> ++#define vec_lvexhx __builtin_vec_lvexhx
> ++#define vec_lvexwx __builtin_vec_lvexwx
> ++#define vec_stvexbx __builtin_vec_stvexbx
> ++#define vec_stvexhx __builtin_vec_stvexhx
> ++#define vec_stvexwx __builtin_vec_stvexwx
> ++#define vec_lvswx __builtin_vec_lvswx
> ++#define vec_lvswxl __builtin_vec_lvswxl
> ++#define vec_stvswx __builtin_vec_stvswx
> ++#define vec_stvswxl __builtin_vec_stvswxl
> ++#define vec_lvsm __builtin_vec_lvsm
> ++#define vec_lvtlx __builtin_vec_lvtlx
> ++#define vec_lvtlxl __builtin_vec_lvtlxl
> ++#define vec_lvtrx __builtin_vec_lvtrx
> ++#define vec_lvtrxl __builtin_vec_lvtrxl
> ++#define vec_stvflx __builtin_vec_stvflx
> ++#define vec_stvflxl __builtin_vec_stvflxl
> ++#define vec_stvfrx __builtin_vec_stvfrx
> ++#define vec_stvfrxl __builtin_vec_stvfrxl
> ++#endif
> ++
> + /* Predicates.
> + For C++, we use templates in order to allow non-parenthesized arguments.
> + For C, instead, we use macros since non-parenthesized arguments were
> +diff -ruN gcc-4.6.2-orig/gcc/config/rs6000/altivec.md gcc-4.6.2/gcc/config/rs6000/altivec.md
> +--- gcc-4.6.2-orig/gcc/config/rs6000/altivec.md 2011-07-08 15:10:18.000000000 -0500
> ++++ gcc-4.6.2/gcc/config/rs6000/altivec.md 2012-03-06 12:24:35.058038999 -0600
> +@@ -91,9 +91,11 @@
> + (UNSPEC_LVSL 194)
> + (UNSPEC_LVSR 195)
> + (UNSPEC_LVE 196)
> ++ (UNSPEC_LVEX 197)
> + (UNSPEC_STVX 201)
> + (UNSPEC_STVXL 202)
> + (UNSPEC_STVE 203)
> ++ (UNSPEC_STVEX 204)
> + (UNSPEC_SET_VSCR 213)
> + (UNSPEC_GET_VRSAVE 214)
> + (UNSPEC_LVX 215)
> +@@ -123,6 +125,19 @@
> + (UNSPEC_STVLXL 241)
> + (UNSPEC_STVRX 242)
> + (UNSPEC_STVRXL 243)
> ++ (UNSPEC_LVTLX 244)
> ++ (UNSPEC_LVTLXL 245)
> ++ (UNSPEC_LVTRX 246)
> ++ (UNSPEC_LVTRXL 247)
> ++ (UNSPEC_STVFLX 248)
> ++ (UNSPEC_STVFLXL 249)
> ++ (UNSPEC_STVFRX 250)
> ++ (UNSPEC_STVFRXL 251)
> ++ (UNSPEC_LVSWX 252)
> ++ (UNSPEC_LVSWXL 253)
> ++ (UNSPEC_LVSM 254)
> ++ (UNSPEC_STVSWX 255)
> ++ (UNSPEC_STVSWXL 256)
> + (UNSPEC_VMULWHUB 308)
> + (UNSPEC_VMULWLUB 309)
> + (UNSPEC_VMULWHSB 310)
> +@@ -143,6 +158,9 @@
> + (UNSPEC_VUPKLS_V4SF 325)
> + (UNSPEC_VUPKHU_V4SF 326)
> + (UNSPEC_VUPKLU_V4SF 327)
> ++ (UNSPEC_VABSDUB 328)
> ++ (UNSPEC_VABSDUH 329)
> ++ (UNSPEC_VABSDUW 330)
> + ])
> +
> + (define_constants
> +@@ -323,6 +341,34 @@
> +
> + ;; Simple binary operations.
> +
> ++;; absd
> ++(define_insn "altivec_vabsduw"
> ++ [(set (match_operand:V4SI 0 "register_operand" "=v")
> ++ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
> ++ (match_operand:V4SI 2 "register_operand" "v")]
> ++ UNSPEC_VABSDUW))]
> ++ "TARGET_ALTIVEC2"
> ++ "vabsduw %0,%1,%2"
> ++ [(set_attr "type" "vecsimple")])
> ++
> ++(define_insn "altivec_vabsduh"
> ++ [(set (match_operand:V8HI 0 "register_operand" "=v")
> ++ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
> ++ (match_operand:V8HI 2 "register_operand" "v")]
> ++ UNSPEC_VABSDUH))]
> ++ "TARGET_ALTIVEC2"
> ++ "vabsduh %0,%1,%2"
> ++ [(set_attr "type" "vecsimple")])
> ++
> ++(define_insn "altivec_vabsdub"
> ++ [(set (match_operand:V16QI 0 "register_operand" "=v")
> ++ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
> ++ (match_operand:V16QI 2 "register_operand" "v")]
> ++ UNSPEC_VABSDUB))]
> ++ "TARGET_ALTIVEC2"
> ++ "vabsdub %0,%1,%2"
> ++ [(set_attr "type" "vecsimple")])
> ++
> + ;; add
> + (define_insn "add<mode>3"
> + [(set (match_operand:VI 0 "register_operand" "=v")
> +@@ -1741,6 +1787,15 @@
> + "lvewx %0,%y1"
> + [(set_attr "type" "vecload")])
> +
> ++(define_insn "altivec_lvex<VI_char>x"
> ++ [(parallel
> ++ [(set (match_operand:VI 0 "register_operand" "=v")
> ++ (match_operand:VI 1 "memory_operand" "Z"))
> ++ (unspec [(const_int 0)] UNSPEC_LVEX)])]
> ++ "TARGET_ALTIVEC2"
> ++ "lvex<VI_char>x %0,%y1"
> ++ [(set_attr "type" "vecload")])
> ++
> + (define_insn "altivec_lvxl"
> + [(parallel
> + [(set (match_operand:V4SI 0 "register_operand" "=v")
> +@@ -1791,6 +1846,13 @@
> + "stvewx %1,%y0"
> + [(set_attr "type" "vecstore")])
> +
> ++(define_insn "altivec_stvex<VI_char>x"
> ++ [(set (match_operand:<VI_scalar> 0 "memory_operand" "=Z")
> ++ (unspec:<VI_scalar> [(match_operand:VI 1 "register_operand" "v")] UNSPEC_STVEX))]
> ++ "TARGET_ALTIVEC2"
> ++ "stvex<VI_char>x %1,%y0"
> ++ [(set_attr "type" "vecstore")])
> ++
> + ;; Generate
> + ;; vspltis? SCRATCH0,0
> + ;; vsubu?m SCRATCH2,SCRATCH1,%1
> +@@ -2358,7 +2420,7 @@
> + DONE;
> + }")
> +
> +-;; Vector SIMD PEM v2.06c defines LVLX, LVLXL, LVRX, LVRXL,
> ++;; Vector SIMD PEM v2.06c defines LVLX, LVLXL, LVRX1, LVRXL,
> + ;; STVLX, STVLXL, STVVRX, STVRXL are available only on Cell.
> + (define_insn "altivec_lvlx"
> + [(set (match_operand:V16QI 0 "register_operand" "=v")
> +@@ -2394,8 +2456,8 @@
> +
> + (define_insn "altivec_stvlx"
> + [(parallel
> +- [(set (match_operand:V4SI 0 "memory_operand" "=Z")
> +- (match_operand:V4SI 1 "register_operand" "v"))
> ++ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
> ++ (match_operand:V16QI 1 "register_operand" "v"))
> + (unspec [(const_int 0)] UNSPEC_STVLX)])]
> + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
> + "stvlx %1,%y0"
> +@@ -2403,8 +2465,8 @@
> +
> + (define_insn "altivec_stvlxl"
> + [(parallel
> +- [(set (match_operand:V4SI 0 "memory_operand" "=Z")
> +- (match_operand:V4SI 1 "register_operand" "v"))
> ++ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
> ++ (match_operand:V16QI 1 "register_operand" "v"))
> + (unspec [(const_int 0)] UNSPEC_STVLXL)])]
> + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
> + "stvlxl %1,%y0"
> +@@ -2412,8 +2474,8 @@
> +
> + (define_insn "altivec_stvrx"
> + [(parallel
> +- [(set (match_operand:V4SI 0 "memory_operand" "=Z")
> +- (match_operand:V4SI 1 "register_operand" "v"))
> ++ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
> ++ (match_operand:V16QI 1 "register_operand" "v"))
> + (unspec [(const_int 0)] UNSPEC_STVRX)])]
> + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
> + "stvrx %1,%y0"
> +@@ -2421,13 +2483,123 @@
> +
> + (define_insn "altivec_stvrxl"
> + [(parallel
> +- [(set (match_operand:V4SI 0 "memory_operand" "=Z")
> +- (match_operand:V4SI 1 "register_operand" "v"))
> ++ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
> ++ (match_operand:V16QI 1 "register_operand" "v"))
> + (unspec [(const_int 0)] UNSPEC_STVRXL)])]
> + "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
> + "stvrxl %1,%y0"
> + [(set_attr "type" "vecstore")])
> +
> ++(define_insn "altivec_lvtlx"
> ++ [(set (match_operand:V16QI 0 "register_operand" "=v")
> ++ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")]
> ++ UNSPEC_LVTLX))]
> ++ "TARGET_ALTIVEC2"
> ++ "lvtlx %0,%y1"
> ++ [(set_attr "type" "vecload")])
> ++
> ++(define_insn "altivec_lvtlxl"
> ++ [(set (match_operand:V16QI 0 "register_operand" "=v")
> ++ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")]
> ++ UNSPEC_LVTLXL))]
> ++ "TARGET_ALTIVEC2"
> ++ "lvtlxl %0,%y1"
> ++ [(set_attr "type" "vecload")])
> ++
> ++(define_insn "altivec_lvtrx"
> ++ [(set (match_operand:V16QI 0 "register_operand" "=v")
> ++ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")]
> ++ UNSPEC_LVTRX))]
> ++ "TARGET_ALTIVEC2"
> ++ "lvtrx %0,%y1"
> ++ [(set_attr "type" "vecload")])
> ++
> ++(define_insn "altivec_lvtrxl"
> ++ [(set (match_operand:V16QI 0 "register_operand" "=v")
> ++ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")]
> ++ UNSPEC_LVTRXL))]
> ++ "TARGET_ALTIVEC2"
> ++ "lvtrxl %0,%y1"
> ++ [(set_attr "type" "vecload")])
> ++
> ++(define_insn "altivec_stvflx"
> ++ [(parallel
> ++ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
> ++ (match_operand:V16QI 1 "register_operand" "v"))
> ++ (unspec [(const_int 0)] UNSPEC_STVFLX)])]
> ++ "TARGET_ALTIVEC2"
> ++ "stvflx %1,%y0"
> ++ [(set_attr "type" "vecstore")])
> ++
> ++(define_insn "altivec_stvflxl"
> ++ [(parallel
> ++ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
> ++ (match_operand:V16QI 1 "register_operand" "v"))
> ++ (unspec [(const_int 0)] UNSPEC_STVFLXL)])]
> ++ "TARGET_ALTIVEC2"
> ++ "stvflxl %1,%y0"
> ++ [(set_attr "type" "vecstore")])
> ++
> ++(define_insn "altivec_stvfrx"
> ++ [(parallel
> ++ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
> ++ (match_operand:V16QI 1 "register_operand" "v"))
> ++ (unspec [(const_int 0)] UNSPEC_STVFRX)])]
> ++ "TARGET_ALTIVEC2"
> ++ "stvfrx %1,%y0"
> ++ [(set_attr "type" "vecstore")])
> ++
> ++(define_insn "altivec_stvfrxl"
> ++ [(parallel
> ++ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
> ++ (match_operand:V16QI 1 "register_operand" "v"))
> ++ (unspec [(const_int 0)] UNSPEC_STVFRXL)])]
> ++ "TARGET_ALTIVEC2"
> ++ "stvfrxl %1,%y0"
> ++ [(set_attr "type" "vecstore")])
> ++
> ++(define_insn "altivec_lvswx"
> ++ [(set (match_operand:V16QI 0 "register_operand" "=v")
> ++ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")]
> ++ UNSPEC_LVSWX))]
> ++ "TARGET_ALTIVEC2"
> ++ "lvswx %0,%y1"
> ++ [(set_attr "type" "vecload")])
> ++
> ++(define_insn "altivec_lvswxl"
> ++ [(set (match_operand:V16QI 0 "register_operand" "=v")
> ++ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")]
> ++ UNSPEC_LVSWXL))]
> ++ "TARGET_ALTIVEC2"
> ++ "lvswxl %0,%y1"
> ++ [(set_attr "type" "vecload")])
> ++
> ++(define_insn "altivec_lvsm"
> ++ [(set (match_operand:V16QI 0 "register_operand" "=v")
> ++ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")]
> ++ UNSPEC_LVSM))]
> ++ "TARGET_ALTIVEC2"
> ++ "lvsm %0,%y1"
> ++ [(set_attr "type" "vecload")])
> ++
> ++(define_insn "altivec_stvswx"
> ++ [(parallel
> ++ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
> ++ (match_operand:V16QI 1 "register_operand" "v"))
> ++ (unspec [(const_int 0)] UNSPEC_STVSWX)])]
> ++ "TARGET_ALTIVEC2"
> ++ "stvswx %1,%y0"
> ++ [(set_attr "type" "vecstore")])
> ++
> ++(define_insn "altivec_stvswxl"
> ++ [(parallel
> ++ [(set (match_operand:V16QI 0 "memory_operand" "=Z")
> ++ (match_operand:V16QI 1 "register_operand" "v"))
> ++ (unspec [(const_int 0)] UNSPEC_STVSWXL)])]
> ++ "TARGET_ALTIVEC2"
> ++ "stvswxl %1,%y0"
> ++ [(set_attr "type" "vecstore")])
> ++
> + (define_expand "vec_extract_evenv4si"
> + [(set (match_operand:V4SI 0 "register_operand" "")
> + (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "")
> +diff -ruN gcc-4.6.2-orig/gcc/config/rs6000/e5500.md gcc-4.6.2/gcc/config/rs6000/e5500.md
> +--- gcc-4.6.2-orig/gcc/config/rs6000/e5500.md 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/config/rs6000/e5500.md 2012-03-06 12:16:07.590039001 -0600
> +@@ -0,0 +1,176 @@
> ++;; Pipeline description for Freescale PowerPC e5500 core.
> ++;; Copyright (C) 2011 Free Software Foundation, Inc.
> ++;; Contributed by Edmar Wienskoski (edmar at freescale.com)
> ++;;
> ++;; This file is part of GCC.
> ++;;
> ++;; GCC is free software; you can redistribute it and/or modify it
> ++;; under the terms of the GNU General Public License as published
> ++;; by the Free Software Foundation; either version 3, or (at your
> ++;; option) any later version.
> ++;;
> ++;; GCC is distributed in the hope that it will be useful, but WITHOUT
> ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
> ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
> ++;; License for more details.
> ++;;
> ++;; You should have received a copy of the GNU General Public License
> ++;; along with GCC; see the file COPYING3. If not see
> ++;; <http://www.gnu.org/licenses/>.
> ++;;
> ++;; e5500 64-bit SFX(2), CFX, LSU, FPU, BU
> ++;; Max issue 3 insns/clock cycle (includes 1 branch)
> ++
> ++(define_automaton "e5500_most,e5500_long")
> ++(define_cpu_unit "e5500_decode_0,e5500_decode_1" "e5500_most")
> ++
> ++;; SFX.
> ++(define_cpu_unit "e5500_sfx_0,e5500_sfx_1" "e5500_most")
> ++
> ++;; CFX.
> ++(define_cpu_unit "e5500_cfx_stage0,e5500_cfx_stage1" "e5500_most")
> ++
> ++;; Non-pipelined division.
> ++(define_cpu_unit "e5500_cfx_div" "e5500_long")
> ++
> ++;; LSU.
> ++(define_cpu_unit "e5500_lsu" "e5500_most")
> ++
> ++;; FPU.
> ++(define_cpu_unit "e5500_fpu" "e5500_long")
> ++
> ++;; BU.
> ++(define_cpu_unit "e5500_bu" "e5500_most")
> ++
> ++;; The following units are used to make the automata deterministic.
> ++(define_cpu_unit "present_e5500_decode_0" "e5500_most")
> ++(define_cpu_unit "present_e5500_sfx_0" "e5500_most")
> ++(presence_set "present_e5500_decode_0" "e5500_decode_0")
> ++(presence_set "present_e5500_sfx_0" "e5500_sfx_0")
> ++
> ++;; Some useful abbreviations.
> ++(define_reservation "e5500_decode"
> ++ "e5500_decode_0|e5500_decode_1+present_e5500_decode_0")
> ++(define_reservation "e5500_sfx"
> ++ "e5500_sfx_0|e5500_sfx_1+present_e5500_sfx_0")
> ++
> ++;; SFX.
> ++(define_insn_reservation "e5500_sfx" 1
> ++ (and (eq_attr "type" "integer,insert_word,insert_dword,delayed_compare,\
> ++ shift,cntlz,exts")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_sfx")
> ++
> ++(define_insn_reservation "e5500_sfx2" 2
> ++ (and (eq_attr "type" "cmp,compare,fast_compare,trap")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_sfx")
> ++
> ++(define_insn_reservation "e5500_delayed" 2
> ++ (and (eq_attr "type" "var_shift_rotate,var_delayed_compare,popcnt")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_sfx*2")
> ++
> ++(define_insn_reservation "e5500_two" 2
> ++ (and (eq_attr "type" "two")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_decode+e5500_sfx,e5500_sfx")
> ++
> ++(define_insn_reservation "e5500_three" 3
> ++ (and (eq_attr "type" "three")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,(e5500_decode+e5500_sfx)*2,e5500_sfx")
> ++
> ++;; SFX - Mfcr.
> ++(define_insn_reservation "e5500_mfcr" 4
> ++ (and (eq_attr "type" "mfcr")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_sfx_0*4")
> ++
> ++;; SFX - Mtcrf.
> ++(define_insn_reservation "e5500_mtcrf" 1
> ++ (and (eq_attr "type" "mtcr")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_sfx_0")
> ++
> ++;; SFX - Mtjmpr.
> ++(define_insn_reservation "e5500_mtjmpr" 1
> ++ (and (eq_attr "type" "mtjmpr,mfjmpr")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_sfx")
> ++
> ++;; CFX - Multiply.
> ++(define_insn_reservation "e5500_multiply" 4
> ++ (and (eq_attr "type" "imul")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_cfx_stage0,e5500_cfx_stage1")
> ++
> ++(define_insn_reservation "e5500_multiply_i" 5
> ++ (and (eq_attr "type" "imul2,imul3,imul_compare")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_cfx_stage0,\
> ++ e5500_cfx_stage0+e5500_cfx_stage1,e5500_cfx_stage1")
> ++
> ++;; CFX - Divide.
> ++(define_insn_reservation "e5500_divide" 16
> ++ (and (eq_attr "type" "idiv")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_cfx_stage0+e5500_cfx_div,\
> ++ e5500_cfx_div*15")
> ++
> ++(define_insn_reservation "e5500_divide_d" 26
> ++ (and (eq_attr "type" "ldiv")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_cfx_stage0+e5500_cfx_div,\
> ++ e5500_cfx_div*25")
> ++
> ++;; LSU - Loads.
> ++(define_insn_reservation "e5500_load" 3
> ++ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
> ++ load_l,sync")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_lsu")
> ++
> ++(define_insn_reservation "e5500_fpload" 4
> ++ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_lsu")
> ++
> ++;; LSU - Stores.
> ++(define_insn_reservation "e5500_store" 3
> ++ (and (eq_attr "type" "store,store_ux,store_u,store_c")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_lsu")
> ++
> ++(define_insn_reservation "e5500_fpstore" 3
> ++ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_lsu")
> ++
> ++;; FP.
> ++(define_insn_reservation "e5500_float" 7
> ++ (and (eq_attr "type" "fpsimple,fp,fpcompare,dmul")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_fpu")
> ++
> ++(define_insn_reservation "e5500_sdiv" 20
> ++ (and (eq_attr "type" "sdiv")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_fpu*20")
> ++
> ++(define_insn_reservation "e5500_ddiv" 35
> ++ (and (eq_attr "type" "ddiv")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_fpu*35")
> ++
> ++;; BU.
> ++(define_insn_reservation "e5500_branch" 1
> ++ (and (eq_attr "type" "jmpreg,branch,isync")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_bu")
> ++
> ++;; BU - CR logical.
> ++(define_insn_reservation "e5500_cr_logical" 1
> ++ (and (eq_attr "type" "cr_logical,delayed_cr")
> ++ (eq_attr "cpu" "ppce5500"))
> ++ "e5500_decode,e5500_bu")
> +diff -ruN gcc-4.6.2-orig/gcc/config/rs6000/e6500.md gcc-4.6.2/gcc/config/rs6000/e6500.md
> +--- gcc-4.6.2-orig/gcc/config/rs6000/e6500.md 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/config/rs6000/e6500.md 2012-03-06 12:16:25.573039002 -0600
> +@@ -0,0 +1,213 @@
> ++;; Pipeline description for Freescale PowerPC e6500 core.
> ++;; Copyright (C) 2011 Free Software Foundation, Inc.
> ++;; Contributed by Edmar Wienskoski (edmar at freescale.com)
> ++;;
> ++;; This file is part of GCC.
> ++;;
> ++;; GCC is free software; you can redistribute it and/or modify it
> ++;; under the terms of the GNU General Public License as published
> ++;; by the Free Software Foundation; either version 3, or (at your
> ++;; option) any later version.
> ++;;
> ++;; GCC is distributed in the hope that it will be useful, but WITHOUT
> ++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
> ++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
> ++;; License for more details.
> ++;;
> ++;; You should have received a copy of the GNU General Public License
> ++;; along with GCC; see the file COPYING3. If not see
> ++;; <http://www.gnu.org/licenses/>.
> ++;;
> ++;; e6500 64-bit SFX(2), CFX, LSU, FPU, BU, VSFX, VCFX, VFPU, VPERM
> ++;; Max issue 3 insns/clock cycle (includes 1 branch)
> ++
> ++(define_automaton "e6500_most,e6500_long,e6500_vec")
> ++(define_cpu_unit "e6500_decode_0,e6500_decode_1" "e6500_most")
> ++
> ++;; SFX.
> ++(define_cpu_unit "e6500_sfx_0,e6500_sfx_1" "e6500_most")
> ++
> ++;; CFX.
> ++(define_cpu_unit "e6500_cfx_stage0,e6500_cfx_stage1" "e6500_most")
> ++
> ++;; Non-pipelined division.
> ++(define_cpu_unit "e6500_cfx_div" "e6500_long")
> ++
> ++;; LSU.
> ++(define_cpu_unit "e6500_lsu" "e6500_most")
> ++
> ++;; FPU.
> ++(define_cpu_unit "e6500_fpu" "e6500_long")
> ++
> ++;; BU.
> ++(define_cpu_unit "e6500_bu" "e6500_most")
> ++
> ++;; Altivec unit
> ++(define_cpu_unit "e6500_vec,e6500_vecperm" "e6500_vec")
> ++
> ++;; The following units are used to make the automata deterministic.
> ++(define_cpu_unit "present_e6500_decode_0" "e6500_most")
> ++(define_cpu_unit "present_e6500_sfx_0" "e6500_most")
> ++(presence_set "present_e6500_decode_0" "e6500_decode_0")
> ++(presence_set "present_e6500_sfx_0" "e6500_sfx_0")
> ++
> ++;; Some useful abbreviations.
> ++(define_reservation "e6500_decode"
> ++ "e6500_decode_0|e6500_decode_1+present_e6500_decode_0")
> ++(define_reservation "e6500_sfx"
> ++ "e6500_sfx_0|e6500_sfx_1+present_e6500_sfx_0")
> ++
> ++;; SFX.
> ++(define_insn_reservation "e6500_sfx" 1
> ++ (and (eq_attr "type" "integer,insert_word,insert_dword,delayed_compare,\
> ++ shift,cntlz,exts")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_sfx")
> ++
> ++(define_insn_reservation "e6500_sfx2" 2
> ++ (and (eq_attr "type" "cmp,compare,fast_compare,trap")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_sfx")
> ++
> ++(define_insn_reservation "e6500_delayed" 2
> ++ (and (eq_attr "type" "var_shift_rotate,var_delayed_compare,popcnt")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_sfx*2")
> ++
> ++(define_insn_reservation "e6500_two" 2
> ++ (and (eq_attr "type" "two")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_decode+e6500_sfx,e6500_sfx")
> ++
> ++(define_insn_reservation "e6500_three" 3
> ++ (and (eq_attr "type" "three")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,(e6500_decode+e6500_sfx)*2,e6500_sfx")
> ++
> ++;; SFX - Mfcr.
> ++(define_insn_reservation "e6500_mfcr" 4
> ++ (and (eq_attr "type" "mfcr")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_sfx_0*4")
> ++
> ++;; SFX - Mtcrf.
> ++(define_insn_reservation "e6500_mtcrf" 1
> ++ (and (eq_attr "type" "mtcr")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_sfx_0")
> ++
> ++;; SFX - Mtjmpr.
> ++(define_insn_reservation "e6500_mtjmpr" 1
> ++ (and (eq_attr "type" "mtjmpr,mfjmpr")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_sfx")
> ++
> ++;; CFX - Multiply.
> ++(define_insn_reservation "e6500_multiply" 4
> ++ (and (eq_attr "type" "imul")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_cfx_stage0,e6500_cfx_stage1")
> ++
> ++(define_insn_reservation "e6500_multiply_i" 5
> ++ (and (eq_attr "type" "imul2,imul3,imul_compare")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_cfx_stage0,\
> ++ e6500_cfx_stage0+e6500_cfx_stage1,e6500_cfx_stage1")
> ++
> ++;; CFX - Divide.
> ++(define_insn_reservation "e6500_divide" 16
> ++ (and (eq_attr "type" "idiv")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_cfx_stage0+e6500_cfx_div,\
> ++ e6500_cfx_div*15")
> ++
> ++(define_insn_reservation "e6500_divide_d" 26
> ++ (and (eq_attr "type" "ldiv")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_cfx_stage0+e6500_cfx_div,\
> ++ e6500_cfx_div*25")
> ++
> ++;; LSU - Loads.
> ++(define_insn_reservation "e6500_load" 3
> ++ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
> ++ load_l,sync")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_lsu")
> ++
> ++(define_insn_reservation "e6500_fpload" 4
> ++ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_lsu")
> ++
> ++(define_insn_reservation "e6500_vecload" 4
> ++ (and (eq_attr "type" "vecload")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_lsu")
> ++
> ++;; LSU - Stores.
> ++(define_insn_reservation "e6500_store" 3
> ++ (and (eq_attr "type" "store,store_ux,store_u,store_c")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_lsu")
> ++
> ++(define_insn_reservation "e6500_fpstore" 3
> ++ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_lsu")
> ++
> ++(define_insn_reservation "e6500_vecstore" 4
> ++ (and (eq_attr "type" "vecstore")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_lsu")
> ++
> ++;; FP.
> ++(define_insn_reservation "e6500_float" 7
> ++ (and (eq_attr "type" "fpsimple,fp,fpcompare,dmul")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_fpu")
> ++
> ++(define_insn_reservation "e6500_sdiv" 20
> ++ (and (eq_attr "type" "sdiv")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_fpu*20")
> ++
> ++(define_insn_reservation "e6500_ddiv" 35
> ++ (and (eq_attr "type" "ddiv")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_fpu*35")
> ++
> ++;; BU.
> ++(define_insn_reservation "e6500_branch" 1
> ++ (and (eq_attr "type" "jmpreg,branch,isync")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_bu")
> ++
> ++;; BU - CR logical.
> ++(define_insn_reservation "e6500_cr_logical" 1
> ++ (and (eq_attr "type" "cr_logical,delayed_cr")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_bu")
> ++
> ++;; VSFX.
> ++(define_insn_reservation "e6500_vecsimple" 1
> ++ (and (eq_attr "type" "vecsimple,veccmp")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_vec")
> ++
> ++;; VCFX.
> ++(define_insn_reservation "e6500_veccomplex" 4
> ++ (and (eq_attr "type" "veccomplex")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_vec")
> ++
> ++;; VFPU.
> ++(define_insn_reservation "e6500_vecfloat" 6
> ++ (and (eq_attr "type" "vecfloat")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_vec")
> ++
> ++;; VPERM.
> ++(define_insn_reservation "e6500_vecperm" 2
> ++ (and (eq_attr "type" "vecperm")
> ++ (eq_attr "cpu" "ppce6500"))
> ++ "e6500_decode,e6500_vecperm")
> +diff -ruN gcc-4.6.2-orig/gcc/config/rs6000/rs6000-builtin.def gcc-4.6.2/gcc/config/rs6000/rs6000-builtin.def
> +--- gcc-4.6.2-orig/gcc/config/rs6000/rs6000-builtin.def 2011-02-21 15:38:21.000000000 -0600
> ++++ gcc-4.6.2/gcc/config/rs6000/rs6000-builtin.def 2012-03-06 12:37:40.248039025 -0600
> +@@ -224,6 +224,9 @@
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_LVEBX, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_LVEHX, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_LVEWX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_LVEXBX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_LVEXHX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_LVEXWX, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_LVXL, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_LVX, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_STVX, RS6000_BTC_MEM)
> +@@ -231,14 +234,30 @@
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_LVLXL, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_LVRX, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_LVRXL, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_LVTLX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_LVTLXL, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_LVTRX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_LVTRXL, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_LVSWX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_LVSWXL, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_LVSM, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_STVEBX, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_STVEHX, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_STVEWX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_STVEXBX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_STVEXHX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_STVEXWX, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_STVXL, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_STVLX, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_STVLXL, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_STVRX, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_STVRXL, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_STVFLX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_STVFLXL, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_STVFRX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_STVFRXL, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_STVSWX, RS6000_BTC_MEM)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_STVSWXL, RS6000_BTC_MEM)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPBFP_P, RS6000_BTC_FP_PURE)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPEQFP_P, RS6000_BTC_FP_PURE)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPEQUB_P, RS6000_BTC_CONST)
> +@@ -275,6 +294,9 @@
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_EXT_V4SF, RS6000_BTC_CONST)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_COPYSIGN_V4SF, RS6000_BTC_CONST)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VRECIPFP, RS6000_BTC_FP_PURE)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_ABSDUB, RS6000_BTC_CONST)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_ABSDUH, RS6000_BTC_CONST)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_ABSDUW, RS6000_BTC_CONST)
> +
> + /* Altivec overloaded builtins. */
> + /* For now, don't set the classification for overloaded functions.
> +@@ -286,6 +308,7 @@
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGT_P, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VCMPGE_P, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ABS, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ABSD, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ABSS, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ADD, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_ADDC, RS6000_BTC_MISC)
> +@@ -321,10 +344,20 @@
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVEBX, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVEHX, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVEWX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVEXBX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVEXHX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVEXWX, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVLX, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVLXL, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVRX, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVRXL, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVTLX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVTLXL, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVTRX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVTRXL, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVSWX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVSWXL, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVSM, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVSL, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_LVSR, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_MADD, RS6000_BTC_MISC)
> +@@ -389,10 +422,19 @@
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVEBX, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVEHX, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVEWX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVEXBX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVEXHX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVEXWX, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVLX, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVLXL, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVRX, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVRXL, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVFLX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVFLXL, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVFRX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVFRXL, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVSWX, RS6000_BTC_MISC)
> ++RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_STVSWXL, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SUB, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SUBC, RS6000_BTC_MISC)
> + RS6000_BUILTIN(ALTIVEC_BUILTIN_VEC_SUBS, RS6000_BTC_MISC)
> +diff -ruN gcc-4.6.2-orig/gcc/config/rs6000/rs6000.c gcc-4.6.2/gcc/config/rs6000/rs6000.c
> +--- gcc-4.6.2-orig/gcc/config/rs6000/rs6000.c 2011-09-18 17:01:56.000000000 -0500
> ++++ gcc-4.6.2/gcc/config/rs6000/rs6000.c 2012-03-06 12:44:04.689039002 -0600
> +@@ -779,6 +779,44 @@
> + 1, /* prefetch streams /*/
> + };
> +
> ++/* Instruction costs on PPCE5500 processors. */
> ++static const
> ++struct processor_costs ppce5500_cost = {
> ++ COSTS_N_INSNS (5), /* mulsi */
> ++ COSTS_N_INSNS (5), /* mulsi_const */
> ++ COSTS_N_INSNS (4), /* mulsi_const9 */
> ++ COSTS_N_INSNS (5), /* muldi */
> ++ COSTS_N_INSNS (14), /* divsi */
> ++ COSTS_N_INSNS (14), /* divdi */
> ++ COSTS_N_INSNS (7), /* fp */
> ++ COSTS_N_INSNS (10), /* dmul */
> ++ COSTS_N_INSNS (36), /* sdiv */
> ++ COSTS_N_INSNS (66), /* ddiv */
> ++ 64, /* cache line size */
> ++ 32, /* l1 cache */
> ++ 128, /* l2 cache */
> ++ 1, /* prefetch streams /*/
> ++};
> ++
> ++/* Instruction costs on PPCE6500 processors. */
> ++static const
> ++struct processor_costs ppce6500_cost = {
> ++ COSTS_N_INSNS (5), /* mulsi */
> ++ COSTS_N_INSNS (5), /* mulsi_const */
> ++ COSTS_N_INSNS (4), /* mulsi_const9 */
> ++ COSTS_N_INSNS (5), /* muldi */
> ++ COSTS_N_INSNS (14), /* divsi */
> ++ COSTS_N_INSNS (14), /* divdi */
> ++ COSTS_N_INSNS (7), /* fp */
> ++ COSTS_N_INSNS (10), /* dmul */
> ++ COSTS_N_INSNS (36), /* sdiv */
> ++ COSTS_N_INSNS (66), /* ddiv */
> ++ 64, /* cache line size */
> ++ 32, /* l1 cache */
> ++ 128, /* l2 cache */
> ++ 1, /* prefetch streams /*/
> ++};
> ++
> + /* Instruction costs on AppliedMicro Titan processors. */
> + static const
> + struct processor_costs titan_cost = {
> +@@ -1690,7 +1728,7 @@
> + | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
> + | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP
> + | MASK_POPCNTD | MASK_VSX | MASK_ISEL | MASK_NO_UPDATE
> +- | MASK_RECIP_PRECISION)
> ++ | MASK_RECIP_PRECISION | MASK_ALTIVEC2)
> + };
> +
> + /* Masks for instructions set at various powerpc ISAs. */
> +@@ -1785,6 +1823,12 @@
> + | MASK_ISEL},
> + {"e500mc64", PROCESSOR_PPCE500MC64, POWERPC_BASE_MASK | MASK_POWERPC64
> + | MASK_PPC_GFXOPT | MASK_ISEL},
> ++ {"e5500", PROCESSOR_PPCE5500, POWERPC_BASE_MASK | MASK_POWERPC64
> ++ | MASK_PPC_GFXOPT | MASK_ISEL | MASK_CMPB | MASK_POPCNTB
> ++ | MASK_POPCNTD},
> ++ {"e6500", PROCESSOR_PPCE6500, POWERPC_7400_MASK | MASK_POWERPC64
> ++ | MASK_MFCRF | MASK_ISEL | MASK_CMPB | MASK_POPCNTB | MASK_POPCNTD
> ++ | MASK_ALTIVEC2},
> + {"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
> + {"970", PROCESSOR_POWER4,
> + POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
> +@@ -2742,13 +2786,19 @@
> + : PROCESSOR_DEFAULT));
> +
> + if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
> +- || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64)
> ++ || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
> ++ || rs6000_cpu == PROCESSOR_PPCE5500)
> + {
> + if (TARGET_ALTIVEC)
> + error ("AltiVec not supported in this target");
> + if (TARGET_SPE)
> + error ("SPE not supported in this target");
> + }
> ++ if (rs6000_cpu == PROCESSOR_PPCE6500)
> ++ {
> ++ if (TARGET_SPE)
> ++ error ("SPE not supported in this target");
> ++ }
> +
> + /* Disable Cell microcode if we are optimizing for the Cell
> + and not optimizing for size. */
> +@@ -2843,9 +2893,16 @@
> + user's opinion, though. */
> + if (rs6000_block_move_inline_limit == 0
> + && (rs6000_cpu == PROCESSOR_PPCE500MC
> +- || rs6000_cpu == PROCESSOR_PPCE500MC64))
> ++ || rs6000_cpu == PROCESSOR_PPCE500MC64
> ++ || rs6000_cpu == PROCESSOR_PPCE5500
> ++ || rs6000_cpu == PROCESSOR_PPCE6500))
> + rs6000_block_move_inline_limit = 128;
> +
> ++ /* Those machines does not have fsqrt instruction */
> ++ if (rs6000_cpu == PROCESSOR_PPCE5500
> ++ || rs6000_cpu == PROCESSOR_PPCE6500)
> ++ target_flags &= ~MASK_PPC_GPOPT;
> ++
> + /* store_one_arg depends on expand_block_move to handle at least the
> + size of reg_parm_stack_space. */
> + if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
> +@@ -2977,7 +3034,9 @@
> + #endif
> +
> + if (TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC
> +- || rs6000_cpu == PROCESSOR_PPCE500MC64)
> ++ || rs6000_cpu == PROCESSOR_PPCE500MC64
> ++ || rs6000_cpu == PROCESSOR_PPCE5500
> ++ || rs6000_cpu == PROCESSOR_PPCE6500)
> + {
> + /* The e500 and e500mc do not have string instructions, and we set
> + MASK_STRING above when optimizing for size. */
> +@@ -3024,7 +3083,9 @@
> + || rs6000_cpu == PROCESSOR_POWER6
> + || rs6000_cpu == PROCESSOR_POWER7
> + || rs6000_cpu == PROCESSOR_PPCE500MC
> +- || rs6000_cpu == PROCESSOR_PPCE500MC64);
> ++ || rs6000_cpu == PROCESSOR_PPCE500MC64
> ++ || rs6000_cpu == PROCESSOR_PPCE5500
> ++ || rs6000_cpu == PROCESSOR_PPCE6500);
> +
> + /* Allow debug switches to override the above settings. These are set to -1
> + in rs6000.opt to indicate the user hasn't directly set the switch. */
> +@@ -3246,6 +3307,14 @@
> + rs6000_cost = &ppce500mc64_cost;
> + break;
> +
> ++ case PROCESSOR_PPCE5500:
> ++ rs6000_cost = &ppce5500_cost;
> ++ break;
> ++
> ++ case PROCESSOR_PPCE6500:
> ++ rs6000_cost = &ppce6500_cost;
> ++ break;
> ++
> + case PROCESSOR_TITAN:
> + rs6000_cost = &titan_cost;
> + break;
> +@@ -10212,6 +10281,9 @@
> + { MASK_ALTIVEC, CODE_FOR_addv8hi3, "__builtin_altivec_vadduhm", ALTIVEC_BUILTIN_VADDUHM },
> + { MASK_ALTIVEC, CODE_FOR_addv4si3, "__builtin_altivec_vadduwm", ALTIVEC_BUILTIN_VADDUWM },
> + { MASK_ALTIVEC, CODE_FOR_addv4sf3, "__builtin_altivec_vaddfp", ALTIVEC_BUILTIN_VADDFP },
> ++ { MASK_ALTIVEC2, CODE_FOR_altivec_vabsdub, "__builtin_altivec_vabsdub", ALTIVEC_BUILTIN_ABSDUB },
> ++ { MASK_ALTIVEC2, CODE_FOR_altivec_vabsduh, "__builtin_altivec_vabsduh", ALTIVEC_BUILTIN_ABSDUH },
> ++ { MASK_ALTIVEC2, CODE_FOR_altivec_vabsduw, "__builtin_altivec_vabsduw", ALTIVEC_BUILTIN_ABSDUW },
> + { MASK_ALTIVEC, CODE_FOR_altivec_vaddcuw, "__builtin_altivec_vaddcuw", ALTIVEC_BUILTIN_VADDCUW },
> + { MASK_ALTIVEC, CODE_FOR_altivec_vaddubs, "__builtin_altivec_vaddubs", ALTIVEC_BUILTIN_VADDUBS },
> + { MASK_ALTIVEC, CODE_FOR_altivec_vaddsbs, "__builtin_altivec_vaddsbs", ALTIVEC_BUILTIN_VADDSBS },
> +@@ -10372,6 +10444,7 @@
> + { MASK_VSX, CODE_FOR_vec_interleave_highv2df, "__builtin_vsx_mergeh_2df", VSX_BUILTIN_VEC_MERGEH_V2DF },
> + { MASK_VSX, CODE_FOR_vec_interleave_highv2di, "__builtin_vsx_mergeh_2di", VSX_BUILTIN_VEC_MERGEH_V2DI },
> +
> ++ { MASK_ALTIVEC2, CODE_FOR_nothing, "__builtin_vec_absd", ALTIVEC_BUILTIN_VEC_ABSD },
> + { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_add", ALTIVEC_BUILTIN_VEC_ADD },
> + { MASK_ALTIVEC|MASK_VSX, CODE_FOR_nothing, "__builtin_vec_vaddfp", ALTIVEC_BUILTIN_VEC_VADDFP },
> + { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduwm", ALTIVEC_BUILTIN_VEC_VADDUWM },
> +@@ -11803,6 +11876,12 @@
> + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
> + case ALTIVEC_BUILTIN_STVEWX:
> + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
> ++ case ALTIVEC_BUILTIN_STVEXBX:
> ++ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvexbx, exp);
> ++ case ALTIVEC_BUILTIN_STVEXHX:
> ++ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvexhx, exp);
> ++ case ALTIVEC_BUILTIN_STVEXWX:
> ++ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvexwx, exp);
> + case ALTIVEC_BUILTIN_STVXL:
> + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
> +
> +@@ -11814,6 +11893,18 @@
> + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
> + case ALTIVEC_BUILTIN_STVRXL:
> + return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
> ++ case ALTIVEC_BUILTIN_STVFLX:
> ++ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvflx, exp);
> ++ case ALTIVEC_BUILTIN_STVFLXL:
> ++ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvflxl, exp);
> ++ case ALTIVEC_BUILTIN_STVFRX:
> ++ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvfrx, exp);
> ++ case ALTIVEC_BUILTIN_STVFRXL:
> ++ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvfrxl, exp);
> ++ case ALTIVEC_BUILTIN_STVSWX:
> ++ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvswx, exp);
> ++ case ALTIVEC_BUILTIN_STVSWXL:
> ++ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvswxl, exp);
> +
> + case VSX_BUILTIN_STXVD2X_V2DF:
> + return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
> +@@ -11948,6 +12039,15 @@
> + case ALTIVEC_BUILTIN_LVEWX:
> + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
> + exp, target, false);
> ++ case ALTIVEC_BUILTIN_LVEXBX:
> ++ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvexbx,
> ++ exp, target, false);
> ++ case ALTIVEC_BUILTIN_LVEXHX:
> ++ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvexhx,
> ++ exp, target, false);
> ++ case ALTIVEC_BUILTIN_LVEXWX:
> ++ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvexwx,
> ++ exp, target, false);
> + case ALTIVEC_BUILTIN_LVXL:
> + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
> + exp, target, false);
> +@@ -11966,6 +12066,27 @@
> + case ALTIVEC_BUILTIN_LVRXL:
> + return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
> + exp, target, true);
> ++ case ALTIVEC_BUILTIN_LVTLX:
> ++ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvtlx,
> ++ exp, target, true);
> ++ case ALTIVEC_BUILTIN_LVTLXL:
> ++ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvtlxl,
> ++ exp, target, true);
> ++ case ALTIVEC_BUILTIN_LVTRX:
> ++ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvtrx,
> ++ exp, target, true);
> ++ case ALTIVEC_BUILTIN_LVTRXL:
> ++ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvtrxl,
> ++ exp, target, true);
> ++ case ALTIVEC_BUILTIN_LVSWX:
> ++ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvswx,
> ++ exp, target, true);
> ++ case ALTIVEC_BUILTIN_LVSWXL:
> ++ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvswxl,
> ++ exp, target, true);
> ++ case ALTIVEC_BUILTIN_LVSM:
> ++ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsm,
> ++ exp, target, true);
> + case VSX_BUILTIN_LXVD2X_V2DF:
> + return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
> + exp, target, false);
> +@@ -13278,6 +13399,9 @@
> + def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
> + def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
> + def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_lvexbx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEXBX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_lvexhx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEXHX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_lvexwx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEXWX);
> + def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
> + def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
> + def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
> +@@ -13285,6 +13409,9 @@
> + def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
> + def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
> + def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_stvexbx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEXBX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_stvexhx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEXHX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_stvexwx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEXWX);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
> +@@ -13293,12 +13420,18 @@
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_lvexbx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEXBX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_lvexhx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEXHX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_lvexwx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEXWX);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_stvexwx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEXWX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_stvexbx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEXBX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_stvexhx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEXHX);
> +
> + def_builtin (MASK_VSX, "__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
> + VSX_BUILTIN_LXVD2X_V2DF);
> +@@ -13351,6 +13484,33 @@
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
> + }
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_lvtlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVTLX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_lvtlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVTLXL);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_lvtrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVTRX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_lvtrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVTRXL);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_lvtlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVTLX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_lvtlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVTLXL);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_lvtrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVTRX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_lvtrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVTRXL);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_stvflx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVFLX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_stvflxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVFLXL);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_stvfrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVFRX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_stvfrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVFRXL);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_stvflx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVFLX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_stvflxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVFLXL);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_stvfrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVFRX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_stvfrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVFRXL);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_lvswx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSWX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_lvswxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSWXL);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_lvswx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSWX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_lvswxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSWXL);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_lvsm", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSM);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_lvsm", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSM);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_stvswx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVSWX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_altivec_stvswxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVSWXL);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_stvswx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVSWX);
> ++ def_builtin (MASK_ALTIVEC2, "__builtin_vec_stvswxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVSWXL);
> ++
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
> + def_builtin (MASK_ALTIVEC, "__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
> +@@ -13668,6 +13828,9 @@
> + case ALTIVEC_BUILTIN_VMULEUH_UNS:
> + case ALTIVEC_BUILTIN_VMULOUB_UNS:
> + case ALTIVEC_BUILTIN_VMULOUH_UNS:
> ++ case ALTIVEC_BUILTIN_ABSDUB:
> ++ case ALTIVEC_BUILTIN_ABSDUH:
> ++ case ALTIVEC_BUILTIN_ABSDUW:
> + h.uns_p[0] = 1;
> + h.uns_p[1] = 1;
> + h.uns_p[2] = 1;
> +@@ -23250,6 +23413,7 @@
> + || rs6000_cpu_attr == CPU_PPC750
> + || rs6000_cpu_attr == CPU_PPC7400
> + || rs6000_cpu_attr == CPU_PPC7450
> ++ || rs6000_cpu_attr == CPU_PPCE5500
> + || rs6000_cpu_attr == CPU_POWER4
> + || rs6000_cpu_attr == CPU_POWER5
> + || rs6000_cpu_attr == CPU_POWER7
> +@@ -23794,6 +23958,8 @@
> + case CPU_PPCE300C3:
> + case CPU_PPCE500MC:
> + case CPU_PPCE500MC64:
> ++ case CPU_PPCE5500:
> ++ case CPU_PPCE6500:
> + case CPU_TITAN:
> + return 2;
> + case CPU_RIOS2:
> +diff -ruN gcc-4.6.2-orig/gcc/config/rs6000/rs6000-c.c gcc-4.6.2/gcc/config/rs6000/rs6000-c.c
> +--- gcc-4.6.2-orig/gcc/config/rs6000/rs6000-c.c 2011-02-02 23:42:19.000000000 -0600
> ++++ gcc-4.6.2/gcc/config/rs6000/rs6000-c.c 2012-03-06 12:54:55.964038969 -0600
> +@@ -310,6 +310,8 @@
> + /* Enable context-sensitive macros. */
> + cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand;
> + }
> ++ if (TARGET_ALTIVEC2)
> ++ builtin_define ("__ALTIVEC2__");
> + }
> + if (rs6000_cpu == PROCESSOR_CELL)
> + builtin_define ("__PPU__");
> +@@ -569,6 +571,24 @@
> + RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
> +
> + /* Binary AltiVec/VSX builtins. */
> ++ { ALTIVEC_BUILTIN_VEC_ABSD, ALTIVEC_BUILTIN_ABSDUB,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_ABSD, ALTIVEC_BUILTIN_ABSDUB,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_ABSD, ALTIVEC_BUILTIN_ABSDUB,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_ABSD, ALTIVEC_BUILTIN_ABSDUH,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_ABSD, ALTIVEC_BUILTIN_ABSDUH,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_ABSD, ALTIVEC_BUILTIN_ABSDUH,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_ABSD, ALTIVEC_BUILTIN_ABSDUW,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_ABSD, ALTIVEC_BUILTIN_ABSDUW,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_ABSD, ALTIVEC_BUILTIN_ABSDUW,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
> + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
> + RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
> + { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
> +@@ -1084,6 +1104,24 @@
> + RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
> + { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX,
> + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVEXWX, ALTIVEC_BUILTIN_LVEXWX,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVEXWX, ALTIVEC_BUILTIN_LVEXWX,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVEXWX, ALTIVEC_BUILTIN_LVEXWX,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVEXWX, ALTIVEC_BUILTIN_LVEXWX,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVEXWX, ALTIVEC_BUILTIN_LVEXWX,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVEXHX, ALTIVEC_BUILTIN_LVEXHX,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVEXHX, ALTIVEC_BUILTIN_LVEXHX,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVEXBX, ALTIVEC_BUILTIN_LVEXBX,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVEXBX, ALTIVEC_BUILTIN_LVEXBX,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
> + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
> + RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
> + { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
> +@@ -1336,6 +1374,258 @@
> + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
> + { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
> + RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLX, ALTIVEC_BUILTIN_LVTLX,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTLXL, ALTIVEC_BUILTIN_LVTLXL,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRX, ALTIVEC_BUILTIN_LVTRX,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVTRXL, ALTIVEC_BUILTIN_LVTRXL,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWX, ALTIVEC_BUILTIN_LVSWX,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSWXL, ALTIVEC_BUILTIN_LVSWXL,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
> ++ { ALTIVEC_BUILTIN_VEC_LVSM, ALTIVEC_BUILTIN_LVSM,
> ++ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
> + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB,
> + RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
> + { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB,
> +@@ -2812,6 +3102,46 @@
> + RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
> + { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
> + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXWX, ALTIVEC_BUILTIN_STVEXWX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXWX, ALTIVEC_BUILTIN_STVEXWX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXWX, ALTIVEC_BUILTIN_STVEXWX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXWX, ALTIVEC_BUILTIN_STVEXWX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXWX, ALTIVEC_BUILTIN_STVEXWX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXWX, ALTIVEC_BUILTIN_STVEXWX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_void },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXWX, ALTIVEC_BUILTIN_STVEXWX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXWX, ALTIVEC_BUILTIN_STVEXWX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXHX, ALTIVEC_BUILTIN_STVEXHX,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXHX, ALTIVEC_BUILTIN_STVEXHX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXHX, ALTIVEC_BUILTIN_STVEXHX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXHX, ALTIVEC_BUILTIN_STVEXHX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXHX, ALTIVEC_BUILTIN_STVEXHX,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXHX, ALTIVEC_BUILTIN_STVEXHX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXBX, ALTIVEC_BUILTIN_STVEXBX,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXBX, ALTIVEC_BUILTIN_STVEXBX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXBX, ALTIVEC_BUILTIN_STVEXBX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXBX, ALTIVEC_BUILTIN_STVEXBX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXBX, ALTIVEC_BUILTIN_STVEXBX,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
> ++ { ALTIVEC_BUILTIN_VEC_STVEXBX, ALTIVEC_BUILTIN_STVEXBX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
> + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
> + RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
> + { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
> +@@ -3016,6 +3346,222 @@
> + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
> + { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
> + RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLX, ALTIVEC_BUILTIN_STVFLX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFLXL, ALTIVEC_BUILTIN_STVFLXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRX, ALTIVEC_BUILTIN_STVFRX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVFRXL, ALTIVEC_BUILTIN_STVFRXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWX, ALTIVEC_BUILTIN_STVSWX,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
> ++ { ALTIVEC_BUILTIN_VEC_STVSWXL, ALTIVEC_BUILTIN_STVSWXL,
> ++ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
> + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_16QI,
> + RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_NOT_OPAQUE },
> + { VSX_BUILTIN_VEC_XXSLDWI, VSX_BUILTIN_XXSLDWI_16QI,
> +diff -ruN gcc-4.6.2-orig/gcc/config/rs6000/rs6000.h gcc-4.6.2/gcc/config/rs6000/rs6000.h
> +--- gcc-4.6.2-orig/gcc/config/rs6000/rs6000.h 2011-07-27 13:17:15.000000000 -0500
> ++++ gcc-4.6.2/gcc/config/rs6000/rs6000.h 2012-03-06 12:16:25.582039002 -0600
> +@@ -168,6 +168,8 @@
> + %{mcpu=e300c3: -me300} \
> + %{mcpu=e500mc: -me500mc} \
> + %{mcpu=e500mc64: -me500mc64} \
> ++%{mcpu=e5500: -me5500} \
> ++%{mcpu=e6500: -me6500} \
> + %{maltivec: -maltivec} \
> + %{mvsx: -mvsx %{!maltivec: -maltivec} %{!mcpu*: %(asm_cpu_power7)}} \
> + -many"
> +@@ -477,13 +479,15 @@
> +
> + #define TARGET_FCTIDZ TARGET_FCFID
> + #define TARGET_STFIWX TARGET_PPC_GFXOPT
> +-#define TARGET_LFIWAX TARGET_CMPB
> +-#define TARGET_LFIWZX TARGET_POPCNTD
> +-#define TARGET_FCFIDS TARGET_POPCNTD
> +-#define TARGET_FCFIDU TARGET_POPCNTD
> +-#define TARGET_FCFIDUS TARGET_POPCNTD
> +-#define TARGET_FCTIDUZ TARGET_POPCNTD
> +-#define TARGET_FCTIWUZ TARGET_POPCNTD
> ++#define TARGET_LFIWAX (TARGET_CMPB && rs6000_cpu != PROCESSOR_PPCE5500 \
> ++ && rs6000_cpu != PROCESSOR_PPCE6500)
> ++#define TARGET_LFIWZX (TARGET_POPCNTD && rs6000_cpu != PROCESSOR_PPCE5500 \
> ++ && rs6000_cpu != PROCESSOR_PPCE6500)
> ++#define TARGET_FCFIDS TARGET_LFIWZX
> ++#define TARGET_FCFIDU TARGET_LFIWZX
> ++#define TARGET_FCFIDUS TARGET_LFIWZX
> ++#define TARGET_FCTIDUZ TARGET_LFIWZX
> ++#define TARGET_FCTIWUZ TARGET_LFIWZX
> +
> + /* E500 processors only support plain "sync", not lwsync. */
> + #define TARGET_NO_LWSYNC TARGET_E500
> +@@ -494,10 +498,14 @@
> +
> + #define TARGET_FRE (TARGET_HARD_FLOAT && TARGET_FPRS \
> + && TARGET_DOUBLE_FLOAT \
> +- && (TARGET_POPCNTB || VECTOR_UNIT_VSX_P (DFmode)))
> ++ && (TARGET_POPCNTB || VECTOR_UNIT_VSX_P (DFmode)) \
> ++ && rs6000_cpu != PROCESSOR_PPCE5500 \
> ++ && rs6000_cpu != PROCESSOR_PPCE6500)
> +
> + #define TARGET_FRSQRTES (TARGET_HARD_FLOAT && TARGET_POPCNTB \
> +- && TARGET_FPRS && TARGET_SINGLE_FLOAT)
> ++ && TARGET_FPRS && TARGET_SINGLE_FLOAT \
> ++ && rs6000_cpu != PROCESSOR_PPCE5500 \
> ++ && rs6000_cpu != PROCESSOR_PPCE6500)
> +
> + #define TARGET_FRSQRTE (TARGET_HARD_FLOAT && TARGET_FPRS \
> + && TARGET_DOUBLE_FLOAT \
> +diff -ruN gcc-4.6.2-orig/gcc/config/rs6000/rs6000.md gcc-4.6.2/gcc/config/rs6000/rs6000.md
> +--- gcc-4.6.2-orig/gcc/config/rs6000/rs6000.md 2011-09-19 11:41:20.000000000 -0500
> ++++ gcc-4.6.2/gcc/config/rs6000/rs6000.md 2012-03-06 12:16:25.584039002 -0600
> +@@ -126,7 +126,7 @@
> +
> + ;; Define an insn type attribute. This is used in function unit delay
> + ;; computations.
> +-(define_attr "type" "integer,two,three,load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,store,store_ux,store_u,fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,vecload,vecstore,imul,imul2,imul3,lmul,idiv,ldiv,insert_word,branch,cmp,fast_compare,compare,var_delayed_compare,delayed_compare,imul_compare,lmul_compare,fpcompare,cr_logical,delayed_cr,mfcr,mfcrf,mtcr,mfjmpr,mtjmpr,fp,fpsimple,dmul,sdiv,ddiv,ssqrt,dsqrt,jmpreg,brinc,vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,vecfloat,vecfdiv,isync,sync,load_l,store_c,shift,trap,insert_dword,var_shift_rotate,cntlz,exts,mffgpr,mftgpr,isel"
> ++(define_attr "type" "integer,two,three,load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,store,store_ux,store_u,fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,vecload,vecstore,imul,imul2,imul3,lmul,idiv,ldiv,insert_word,branch,cmp,fast_compare,compare,var_delayed_compare,delayed_compare,imul_compare,lmul_compare,fpcompare,cr_logical,delayed_cr,mfcr,mfcrf,mtcr,mfjmpr,mtjmpr,fp,fpsimple,dmul,sdiv,ddiv,ssqrt,dsqrt,jmpreg,brinc,vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,vecfloat,vecfdiv,isync,sync,load_l,store_c,shift,trap,insert_dword,var_shift_rotate,cntlz,exts,mffgpr,mftgpr,isel,popcnt"
> + (const_string "integer"))
> +
> + ;; Define floating point instruction sub-types for use with Xfpu.md
> +@@ -148,7 +148,7 @@
> + ;; Processor type -- this attribute must exactly match the processor_type
> + ;; enumeration in rs6000.h.
> +
> +-(define_attr "cpu" "rios1,rios2,rs64a,mpccore,ppc403,ppc405,ppc440,ppc476,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400,ppc7450,ppc8540,ppce300c2,ppce300c3,ppce500mc,ppce500mc64,power4,power5,power6,power7,cell,ppca2,titan"
> ++(define_attr "cpu" "rios1,rios2,rs64a,mpccore,ppc403,ppc405,ppc440,ppc476,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400,ppc7450,ppc8540,ppce300c2,ppce300c3,ppce500mc,ppce500mc64,ppce5500,ppce6500,power4,power5,power6,power7,cell,ppca2,titan"
> + (const (symbol_ref "rs6000_cpu_attr")))
> +
> +
> +@@ -176,6 +176,8 @@
> + (include "e300c2c3.md")
> + (include "e500mc.md")
> + (include "e500mc64.md")
> ++(include "e5500.md")
> ++(include "e6500.md")
> + (include "power4.md")
> + (include "power5.md")
> + (include "power6.md")
> +@@ -2302,13 +2304,17 @@
> + (unspec:GPR [(match_operand:GPR 1 "gpc_reg_operand" "r")]
> + UNSPEC_POPCNTB))]
> + "TARGET_POPCNTB"
> +- "popcntb %0,%1")
> ++ "popcntb %0,%1"
> ++ [(set_attr "length" "4")
> ++ (set_attr "type" "popcnt")])
> +
> + (define_insn "popcntd<mode>2"
> + [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
> + (popcount:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
> + "TARGET_POPCNTD"
> +- "popcnt<wd> %0,%1")
> ++ "popcnt<wd> %0,%1"
> ++ [(set_attr "length" "4")
> ++ (set_attr "type" "popcnt")])
> +
> + (define_expand "popcount<mode>2"
> + [(set (match_operand:GPR 0 "gpc_reg_operand" "")
> +@@ -5957,10 +5963,10 @@
> + && ((TARGET_PPC_GFXOPT
> + && !HONOR_NANS (<MODE>mode)
> + && !HONOR_SIGNED_ZEROS (<MODE>mode))
> +- || TARGET_CMPB
> ++ || TARGET_LFIWAX
> + || VECTOR_UNIT_VSX_P (<MODE>mode))"
> + {
> +- if (TARGET_CMPB || VECTOR_UNIT_VSX_P (<MODE>mode))
> ++ if (TARGET_LFIWAX || VECTOR_UNIT_VSX_P (<MODE>mode))
> + {
> + emit_insn (gen_copysign<mode>3_fcpsgn (operands[0], operands[1],
> + operands[2]));
> +@@ -5979,7 +5985,7 @@
> + (unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "<rreg2>")
> + (match_operand:SFDF 2 "gpc_reg_operand" "<rreg2>")]
> + UNSPEC_COPYSIGN))]
> +- "TARGET_CMPB && !VECTOR_UNIT_VSX_P (<MODE>mode)"
> ++ "TARGET_LFIWAX && !VECTOR_UNIT_VSX_P (<MODE>mode)"
> + "fcpsgn %0,%2,%1"
> + [(set_attr "type" "fp")])
> +
> +diff -ruN gcc-4.6.2-orig/gcc/config/rs6000/rs6000.opt gcc-4.6.2/gcc/config/rs6000/rs6000.opt
> +--- gcc-4.6.2-orig/gcc/config/rs6000/rs6000.opt 2010-11-29 19:47:54.000000000 -0600
> ++++ gcc-4.6.2/gcc/config/rs6000/rs6000.opt 2012-03-06 12:16:25.584039002 -0600
> +@@ -179,6 +179,10 @@
> + Target Report Mask(ALTIVEC) Save
> + Use AltiVec instructions
> +
> ++maltivec2
> ++Target Report Mask(ALTIVEC2) Save
> ++Use AltiVec PowerPC V2.07 instructions
> ++
> + mhard-dfp
> + Target Report Mask(DFP) Save
> + Use decimal floating point instructions
> +diff -ruN gcc-4.6.2-orig/gcc/config/rs6000/rs6000-opts.h gcc-4.6.2/gcc/config/rs6000/rs6000-opts.h
> +--- gcc-4.6.2-orig/gcc/config/rs6000/rs6000-opts.h 2010-11-19 11:27:18.000000000 -0600
> ++++ gcc-4.6.2/gcc/config/rs6000/rs6000-opts.h 2012-03-06 12:16:25.584039002 -0600
> +@@ -53,6 +53,8 @@
> + PROCESSOR_PPCE300C3,
> + PROCESSOR_PPCE500MC,
> + PROCESSOR_PPCE500MC64,
> ++ PROCESSOR_PPCE5500,
> ++ PROCESSOR_PPCE6500,
> + PROCESSOR_POWER4,
> + PROCESSOR_POWER5,
> + PROCESSOR_POWER6,
> +diff -ruN gcc-4.6.2-orig/gcc/config.gcc gcc-4.6.2/gcc/config.gcc
> +--- gcc-4.6.2-orig/gcc/config.gcc 2011-07-22 11:44:50.000000000 -0500
> ++++ gcc-4.6.2/gcc/config.gcc 2012-03-06 12:16:25.585039002 -0600
> +@@ -396,7 +396,7 @@
> + extra_headers="ppc-asm.h altivec.h spe.h ppu_intrinsics.h paired.h spu2vmx.h vec_types.h si2vmx.h"
> + need_64bit_hwint=yes
> + case x$with_cpu in
> +- xpowerpc64|xdefault64|x6[23]0|x970|xG5|xpower[34567]|xpower6x|xrs64a|xcell|xa2|xe500mc64)
> ++ xpowerpc64|xdefault64|x6[23]0|x970|xG5|xpower[34567]|xpower6x|xrs64a|xcell|xa2|xe500mc64|xe5500|Xe6500)
> + cpu_is_64bit=yes
> + ;;
> + esac
> +@@ -3501,8 +3501,8 @@
> + | 401 | 403 | 405 | 405fp | 440 | 440fp | 464 | 464fp \
> + | 476 | 476fp | 505 | 601 | 602 | 603 | 603e | ec603e \
> + | 604 | 604e | 620 | 630 | 740 | 750 | 7400 | 7450 \
> +- | a2 | e300c[23] | 854[08] | e500mc | e500mc64 | titan\
> +- | 801 | 821 | 823 | 860 | 970 | G3 | G4 | G5 | cell)
> ++ | a2 | e300c[23] | 854[08] | e500mc | e500mc64 | e5500 | e6500 \
> ++ | titan | 801 | 821 | 823 | 860 | 970 | G3 | G4 | G5 | cell)
> + # OK
> + ;;
> + *)
> +diff -ruN gcc-4.6.2-orig/gcc/doc/extend.texi gcc-4.6.2/gcc/doc/extend.texi
> +--- gcc-4.6.2-orig/gcc/doc/extend.texi 2011-10-24 09:55:45.000000000 -0500
> ++++ gcc-4.6.2/gcc/doc/extend.texi 2012-03-06 12:56:49.399039002 -0600
> +@@ -12509,6 +12509,291 @@
> + @samp{vec_vsx_st} builtins will always generate the VSX @samp{LXVD2X},
> + @samp{LXVW4X}, @samp{STXVD2X}, and @samp{STXVW4X} instructions.
> +
> ++Using @option{-maltivec2} will extend the Altivec interface with the
> ++following additional functions:
> ++
> ++ at smallexample
> ++vector unsigned char vec_absd (vector unsigned char, vector unsigned char);
> ++vector unsigned char vec_absd (vector bool char, vector unsigned char);
> ++vector unsigned char vec_absd (vector unsigned char, vector bool char);
> ++vector unsigned short vec_absd (vector unsigned short, vector unsigned short);
> ++vector unsigned short vec_absd (vector bool short, vector unsigned short);
> ++vector unsigned short vec_absd (vector unsigned short, vector bool short);
> ++vector unsigned int vec_absd (vector unsigned int, vector unsigned int);
> ++vector unsigned int vec_absd (vector bool int, vector unsigned int);
> ++vector unsigned int vec_absd (vector unsigned int, vector bool int);
> ++
> ++vector signed char vec_lvexbx (long, signed char *);
> ++vector unsigned char vec_lvexbx (long, unsigned char *);
> ++vector signed short vec_lvexhx (long, signed short *);
> ++vector unsigned short vec_lvexhx (long, unsigned short *);
> ++vector float vec_lvexwx (long, float *);
> ++vector signed int vec_lvexwx (long, signed int *);
> ++vector unsigned int vec_lvexwx (long, unsigned int *);
> ++vector signed int vec_lvexwx (long, signed long *);
> ++vector unsigned int vec_lvexwx (long, unsigned long *);
> ++
> ++void vec_stvexbx (vector signed char, long, signed char *);
> ++void vec_stvexbx (vector unsigned char, long, unsigned char *);
> ++void vec_stvexbx (vector bool char, long, signed char *);
> ++void vec_stvexbx (vector bool char, long, unsigned char *);
> ++void vec_stvexbx (vector signed char, long, void *);
> ++void vec_stvexbx (vector unsigned char, long, void *);
> ++void vec_stvexhx (vector signed short, long, signed short *);
> ++void vec_stvexhx (vector unsigned short, long, unsigned short *);
> ++void vec_stvexhx (vector bool short, long, signed short *);
> ++void vec_stvexhx (vector bool short, long, unsigned short *);
> ++void vec_stvexhx (vector signed short, long, void *);
> ++void vec_stvexhx (vector unsigned short, long, void *);
> ++void vec_stvexwx (vector float, long, float *);
> ++void vec_stvexwx (vector signed int, long, signed int *);
> ++void vec_stvexwx (vector unsigned int, long, unsigned int *);
> ++void vec_stvexwx (vector bool int, long, signed int *);
> ++void vec_stvexwx (vector bool int, long, unsigned int *);
> ++void vec_stvexwx (vector float, long, void *);
> ++void vec_stvexwx (vector signed int, long, void *);
> ++void vec_stvexwx (vector unsigned int, long, void *);
> ++
> ++vector float vec_lvtlx (long, vector float *);
> ++vector float vec_lvtlx (long, float *);
> ++vector bool int vec_lvtlx (long, vector bool int *);
> ++vector signed int vec_lvtlx (long, vector signed int *);
> ++vector signed int vec_lvtlx (long, signed int *);
> ++vector unsigned int vec_lvtlx (long, vector unsigned int *);
> ++vector unsigned int vec_lvtlx (long, unsigned int *);
> ++vector bool short vec_lvtlx (long, vector bool short *);
> ++vector pixel vec_lvtlx (long, vector pixel *);
> ++vector signed short vec_lvtlx (long, vector signed short *);
> ++vector signed short vec_lvtlx (long, signed short *);
> ++vector unsigned short vec_lvtlx (long, vector unsigned short *);
> ++vector unsigned short vec_lvtlx (long, unsigned short *);
> ++vector bool char vec_lvtlx (long, vector bool char *);
> ++vector signed char vec_lvtlx (long, vector signed char *);
> ++vector signed char vec_lvtlx (long, signed char *);
> ++vector unsigned char vec_lvtlx (long, vector unsigned char *);
> ++vector unsigned char vec_lvtlx (long, unsigned char *);
> ++vector float vec_lvtlxl (long, vector float *);
> ++vector float vec_lvtlxl (long, float *);
> ++vector bool int vec_lvtlxl (long, vector bool int *);
> ++vector signed int vec_lvtlxl (long, vector signed int *);
> ++vector signed int vec_lvtlxl (long, signed int *);
> ++vector unsigned int vec_lvtlxl (long, vector unsigned int *);
> ++vector unsigned int vec_lvtlxl (long, unsigned int *);
> ++vector bool short vec_lvtlxl (long, vector bool short *);
> ++vector pixel vec_lvtlxl (long, vector pixel *);
> ++vector signed short vec_lvtlxl (long, vector signed short *);
> ++vector signed short vec_lvtlxl (long, signed short *);
> ++vector unsigned short vec_lvtlxl (long, vector unsigned short *);
> ++vector unsigned short vec_lvtlxl (long, unsigned short *);
> ++vector bool char vec_lvtlxl (long, vector bool char *);
> ++vector signed char vec_lvtlxl (long, vector signed char *);
> ++vector signed char vec_lvtlxl (long, signed char *);
> ++vector unsigned char vec_lvtlxl (long, vector unsigned char *);
> ++vector unsigned char vec_lvtlxl (long, unsigned char *);
> ++vector float vec_lvtrx (long, vector float *);
> ++vector float vec_lvtrx (long, float *);
> ++vector bool int vec_lvtrx (long, vector bool int *);
> ++vector signed int vec_lvtrx (long, vector signed int *);
> ++vector signed int vec_lvtrx (long, signed int *);
> ++vector unsigned int vec_lvtrx (long, vector unsigned int *);
> ++vector unsigned int vec_lvtrx (long, unsigned int *);
> ++vector bool short vec_lvtrx (long, vector bool short *);
> ++vector pixel vec_lvtrx (long, vector pixel *);
> ++vector signed short vec_lvtrx (long, vector signed short *);
> ++vector signed short vec_lvtrx (long, signed short *);
> ++vector unsigned short vec_lvtrx (long, vector unsigned short *);
> ++vector unsigned short vec_lvtrx (long, unsigned short *);
> ++vector bool char vec_lvtrx (long, vector bool char *);
> ++vector signed char vec_lvtrx (long, vector signed char *);
> ++vector signed char vec_lvtrx (long, signed char *);
> ++vector unsigned char vec_lvtrx (long, vector unsigned char *);
> ++vector unsigned char vec_lvtrx (long, unsigned char *);
> ++vector float vec_lvtrxl (long, vector float *);
> ++vector float vec_lvtrxl (long, float *);
> ++vector bool int vec_lvtrxl (long, vector bool int *);
> ++vector signed int vec_lvtrxl (long, vector signed int *);
> ++vector signed int vec_lvtrxl (long, signed int *);
> ++vector unsigned int vec_lvtrxl (long, vector unsigned int *);
> ++vector unsigned int vec_lvtrxl (long, unsigned int *);
> ++vector bool short vec_lvtrxl (long, vector bool short *);
> ++vector pixel vec_lvtrxl (long, vector pixel *);
> ++vector signed short vec_lvtrxl (long, vector signed short *);
> ++vector signed short vec_lvtrxl (long, signed short *);
> ++vector unsigned short vec_lvtrxl (long, vector unsigned short *);
> ++vector unsigned short vec_lvtrxl (long, unsigned short *);
> ++vector bool char vec_lvtrxl (long, vector bool char *);
> ++vector signed char vec_lvtrxl (long, vector signed char *);
> ++vector signed char vec_lvtrxl (long, signed char *);
> ++vector unsigned char vec_lvtrxl (long, vector unsigned char *);
> ++vector unsigned char vec_lvtrxl (long, unsigned char *);
> ++
> ++void vec_stvflx (vector float, long, vector float *);
> ++void vec_stvflx (vector float, long, float *);
> ++void vec_stvflx (vector bool int, long, vector bool int *);
> ++void vec_stvflx (vector signed int, long, vector signed int *);
> ++void vec_stvflx (vector signed int, long, signed int *);
> ++void vec_stvflx (vector unsigned int, long, vector unsigned int *);
> ++void vec_stvflx (vector unsigned int, long, unsigned int *);
> ++void vec_stvflx (vector bool short, long, vector bool short *);
> ++void vec_stvflx (vector pixel, long, vector pixel *);
> ++void vec_stvflx (vector signed short, long, vector signed short *);
> ++void vec_stvflx (vector signed short, long, signed short *);
> ++void vec_stvflx (vector unsigned short, long, vector unsigned short *);
> ++void vec_stvflx (vector unsigned short, long, unsigned short *);
> ++void vec_stvflx (vector bool char, long, vector bool char *);
> ++void vec_stvflx (vector signed char, long, vector signed char *);
> ++void vec_stvflx (vector signed char, long, signed char *);
> ++void vec_stvflx (vector unsigned char, long, vector unsigned char *);
> ++void vec_stvflx (vector unsigned char, long, unsigned char *);
> ++void vec_stvflxl (vector float, long, vector float *);
> ++void vec_stvflxl (vector float, long, float *);
> ++void vec_stvflxl (vector bool int, long, vector bool int *);
> ++void vec_stvflxl (vector signed int, long, vector signed int *);
> ++void vec_stvflxl (vector signed int, long, signed int *);
> ++void vec_stvflxl (vector unsigned int, long, vector unsigned int *);
> ++void vec_stvflxl (vector unsigned int, long, unsigned int *);
> ++void vec_stvflxl (vector bool short, long, vector bool short *);
> ++void vec_stvflxl (vector pixel, long, vector pixel *);
> ++void vec_stvflxl (vector signed short, long, vector signed short *);
> ++void vec_stvflxl (vector signed short, long, signed short *);
> ++void vec_stvflxl (vector unsigned short, long, vector unsigned short *);
> ++void vec_stvflxl (vector unsigned short, long, unsigned short *);
> ++void vec_stvflxl (vector bool char, long, vector bool char *);
> ++void vec_stvflxl (vector signed char, long, vector signed char *);
> ++void vec_stvflxl (vector signed char, long, signed char *);
> ++void vec_stvflxl (vector unsigned char, long, vector unsigned char *);
> ++void vec_stvflxl (vector unsigned char, long, unsigned char *);
> ++void vec_stvfrx (vector float, long, vector float *);
> ++void vec_stvfrx (vector float, long, float *);
> ++void vec_stvfrx (vector bool int, long, vector bool int *);
> ++void vec_stvfrx (vector signed int, long, vector signed int *);
> ++void vec_stvfrx (vector signed int, long, signed int *);
> ++void vec_stvfrx (vector unsigned int, long, vector unsigned int *);
> ++void vec_stvfrx (vector unsigned int, long, unsigned int *);
> ++void vec_stvfrx (vector bool short, long, vector bool short *);
> ++void vec_stvfrx (vector pixel, long, vector pixel *);
> ++void vec_stvfrx (vector signed short, long, vector signed short *);
> ++void vec_stvfrx (vector signed short, long, signed short *);
> ++void vec_stvfrx (vector unsigned short, long, vector unsigned short *);
> ++void vec_stvfrx (vector unsigned short, long, unsigned short *);
> ++void vec_stvfrx (vector bool char, long, vector bool char *);
> ++void vec_stvfrx (vector signed char, long, vector signed char *);
> ++void vec_stvfrx (vector signed char, long, signed char *);
> ++void vec_stvfrx (vector unsigned char, long, vector unsigned char *);
> ++void vec_stvfrx (vector unsigned char, long, unsigned char *);
> ++void vec_stvfrxl (vector float, long, vector float *);
> ++void vec_stvfrxl (vector float, long, float *);
> ++void vec_stvfrxl (vector bool int, long, vector bool int *);
> ++void vec_stvfrxl (vector signed int, long, vector signed int *);
> ++void vec_stvfrxl (vector signed int, long, signed int *);
> ++void vec_stvfrxl (vector unsigned int, long, vector unsigned int *);
> ++void vec_stvfrxl (vector unsigned int, long, unsigned int *);
> ++void vec_stvfrxl (vector bool short, long, vector bool short *);
> ++void vec_stvfrxl (vector pixel, long, vector pixel *);
> ++void vec_stvfrxl (vector signed short, long, vector signed short *);
> ++void vec_stvfrxl (vector signed short, long, signed short *);
> ++void vec_stvfrxl (vector unsigned short, long, vector unsigned short *);
> ++void vec_stvfrxl (vector unsigned short, long, unsigned short *);
> ++void vec_stvfrxl (vector bool char, long, vector bool char *);
> ++void vec_stvfrxl (vector signed char, long, vector signed char *);
> ++void vec_stvfrxl (vector signed char, long, signed char *);
> ++void vec_stvfrxl (vector unsigned char, long, vector unsigned char *);
> ++void vec_stvfrxl (vector unsigned char, long, unsigned char *);
> ++
> ++vector float vec_lvswx (long, vector float *);
> ++vector float vec_lvswx (long, float *);
> ++vector bool int vec_lvswx (long, vector bool int *);
> ++vector signed int vec_lvswx (long, vector signed int *);
> ++vector signed int vec_lvswx (long, signed int *);
> ++vector unsigned int vec_lvswx (long, vector unsigned int *);
> ++vector unsigned int vec_lvswx (long, unsigned int *);
> ++vector bool short vec_lvswx (long, vector bool short *);
> ++vector pixel vec_lvswx (long, vector pixel *);
> ++vector signed short vec_lvswx (long, vector signed short *);
> ++vector signed short vec_lvswx (long, signed short *);
> ++vector unsigned short vec_lvswx (long, vector unsigned short *);
> ++vector unsigned short vec_lvswx (long, unsigned short *);
> ++vector bool char vec_lvswx (long, vector bool char *);
> ++vector signed char vec_lvswx (long, vector signed char *);
> ++vector signed char vec_lvswx (long, signed char *);
> ++vector unsigned char vec_lvswx (long, vector unsigned char *);
> ++vector unsigned char vec_lvswx (long, unsigned char *);
> ++vector float vec_lvswxl (long, vector float *);
> ++vector float vec_lvswxl (long, float *);
> ++vector bool int vec_lvswxl (long, vector bool int *);
> ++vector signed int vec_lvswxl (long, vector signed int *);
> ++vector signed int vec_lvswxl (long, signed int *);
> ++vector unsigned int vec_lvswxl (long, vector unsigned int *);
> ++vector unsigned int vec_lvswxl (long, unsigned int *);
> ++vector bool short vec_lvswxl (long, vector bool short *);
> ++vector pixel vec_lvswxl (long, vector pixel *);
> ++vector signed short vec_lvswxl (long, vector signed short *);
> ++vector signed short vec_lvswxl (long, signed short *);
> ++vector unsigned short vec_lvswxl (long, vector unsigned short *);
> ++vector unsigned short vec_lvswxl (long, unsigned short *);
> ++vector bool char vec_lvswxl (long, vector bool char *);
> ++vector signed char vec_lvswxl (long, vector signed char *);
> ++vector signed char vec_lvswxl (long, signed char *);
> ++vector unsigned char vec_lvswxl (long, vector unsigned char *);
> ++vector unsigned char vec_lvswxl (long, unsigned char *);
> ++
> ++void vec_stvswx (vector float, long, vector float *);
> ++void vec_stvswx (vector float, long, float *);
> ++void vec_stvswx (vector bool int, long, vector bool int *);
> ++void vec_stvswx (vector signed int, long, vector signed int *);
> ++void vec_stvswx (vector signed int, long, signed int *);
> ++void vec_stvswx (vector unsigned int, long, vector unsigned int *);
> ++void vec_stvswx (vector unsigned int, long, unsigned int *);
> ++void vec_stvswx (vector bool short, long, vector bool short *);
> ++void vec_stvswx (vector pixel, long, vector pixel *);
> ++void vec_stvswx (vector signed short, long, vector signed short *);
> ++void vec_stvswx (vector signed short, long, signed short *);
> ++void vec_stvswx (vector unsigned short, long, vector unsigned short *);
> ++void vec_stvswx (vector unsigned short, long, unsigned short *);
> ++void vec_stvswx (vector bool char, long, vector bool char *);
> ++void vec_stvswx (vector signed char, long, vector signed char *);
> ++void vec_stvswx (vector signed char, long, signed char *);
> ++void vec_stvswx (vector unsigned char, long, vector unsigned char *);
> ++void vec_stvswx (vector unsigned char, long, unsigned char *);
> ++void vec_stvswxl (vector float, long, vector float *);
> ++void vec_stvswxl (vector float, long, float *);
> ++void vec_stvswxl (vector bool int, long, vector bool int *);
> ++void vec_stvswxl (vector signed int, long, vector signed int *);
> ++void vec_stvswxl (vector signed int, long, signed int *);
> ++void vec_stvswxl (vector unsigned int, long, vector unsigned int *);
> ++void vec_stvswxl (vector unsigned int, long, unsigned int *);
> ++void vec_stvswxl (vector bool short, long, vector bool short *);
> ++void vec_stvswxl (vector pixel, long, vector pixel *);
> ++void vec_stvswxl (vector signed short, long, vector signed short *);
> ++void vec_stvswxl (vector signed short, long, signed short *);
> ++void vec_stvswxl (vector unsigned short, long, vector unsigned short *);
> ++void vec_stvswxl (vector unsigned short, long, unsigned short *);
> ++void vec_stvswxl (vector bool char, long, vector bool char *);
> ++void vec_stvswxl (vector signed char, long, vector signed char *);
> ++void vec_stvswxl (vector signed char, long, signed char *);
> ++void vec_stvswxl (vector unsigned char, long, vector unsigned char *);
> ++void vec_stvswxl (vector unsigned char, long, unsigned char *);
> ++
> ++vector float vec_lvsm (long, vector float *);
> ++vector float vec_lvsm (long, float *);
> ++vector bool int vec_lvsm (long, vector bool int *);
> ++vector signed int vec_lvsm (long, vector signed int *);
> ++vector signed int vec_lvsm (long, signed int *);
> ++vector unsigned int vec_lvsm (long, vector unsigned int *);
> ++vector unsigned int vec_lvsm (long, unsigned int *);
> ++vector bool short vec_lvsm (long, vector bool short *);
> ++vector pixel vec_lvsm (long, vector pixel *);
> ++vector signed short vec_lvsm (long, vector signed short *);
> ++vector signed short vec_lvsm (long, signed short *);
> ++vector unsigned short vec_lvsm (long, vector unsigned short *);
> ++vector unsigned short vec_lvsm (long, unsigned short *);
> ++vector bool char vec_lvsm (long, vector bool char *);
> ++vector signed char vec_lvsm (long, vector signed char *);
> ++vector signed char vec_lvsm (long, signed char *);
> ++vector unsigned char vec_lvsm (long, vector unsigned char *);
> ++vector unsigned char vec_lvsm (long, unsigned char *);
> ++ at end smallexample
> ++
> + GCC provides a few other builtins on Powerpc to access certain instructions:
> + @smallexample
> + float __builtin_recipdivf (float, float);
> +diff -ruN gcc-4.6.2-orig/gcc/doc/invoke.texi gcc-4.6.2/gcc/doc/invoke.texi
> +--- gcc-4.6.2-orig/gcc/doc/invoke.texi 2011-10-24 07:22:21.000000000 -0500
> ++++ gcc-4.6.2/gcc/doc/invoke.texi 2012-03-06 12:56:49.402039002 -0600
> +@@ -770,7 +770,7 @@
> + -mcmodel=@var{code-model} @gol
> + -mpower -mno-power -mpower2 -mno-power2 @gol
> + -mpowerpc -mpowerpc64 -mno-powerpc @gol
> +--maltivec -mno-altivec @gol
> ++-maltivec -mno-altivec -maltivec2 -mno-altivec2 @gol
> + -mpowerpc-gpopt -mno-powerpc-gpopt @gol
> + -mpowerpc-gfxopt -mno-powerpc-gfxopt @gol
> + -mmfcrf -mno-mfcrf -mpopcntb -mno-popcntb -mpopcntd -mno-popcntd @gol
> +@@ -15536,16 +15536,21 @@
> + The @option{-mpopcntb} option allows GCC to generate the popcount and
> + double precision FP reciprocal estimate instruction implemented on the
> + POWER5 processor and other processors that support the PowerPC V2.02
> +-architecture.
> +-The @option{-mpopcntd} option allows GCC to generate the popcount
> +-instruction implemented on the POWER7 processor and other processors
> +-that support the PowerPC V2.06 architecture.
> ++architecture. On the e5500 and e6500 processors, only the popcount
> ++instruction is generated.
> ++The @option{-mpopcntd} option allows GCC to generate the popcount and
> ++double word to FP conversion instructions implemented on the POWER7
> ++processor and other processors that support the PowerPC V2.06
> ++architecture. On the e5500 and e6500 processors, only the popcount
> ++instruction is generated.
> + The @option{-mfprnd} option allows GCC to generate the FP round to
> + integer instructions implemented on the POWER5+ processor and other
> + processors that support the PowerPC V2.03 architecture.
> + The @option{-mcmpb} option allows GCC to generate the compare bytes
> +-instruction implemented on the POWER6 processor and other processors
> +-that support the PowerPC V2.05 architecture.
> ++and copy sign instructions implemented on the POWER6 processor and
> ++other processors that support the PowerPC V2.05 architecture. On the
> ++e5500 and e6500 processors, only the compare bytes instruction is
> ++generated.
> + The @option{-mmfpgpr} option allows GCC to generate the FP move to/from
> + general purpose register instructions implemented on the POWER6X
> + processor and other processors that support the extended PowerPC V2.05
> +@@ -15592,11 +15597,13 @@
> + @samp{603e}, @samp{604}, @samp{604e}, @samp{620}, @samp{630}, @samp{740},
> + @samp{7400}, @samp{7450}, @samp{750}, @samp{801}, @samp{821}, @samp{823},
> + @samp{860}, @samp{970}, @samp{8540}, @samp{a2}, @samp{e300c2},
> +- at samp{e300c3}, @samp{e500mc}, @samp{e500mc64}, @samp{ec603e}, @samp{G3},
> +- at samp{G4}, @samp{G5}, @samp{titan}, @samp{power}, @samp{power2}, @samp{power3},
> +- at samp{power4}, @samp{power5}, @samp{power5+}, @samp{power6}, @samp{power6x},
> +- at samp{power7}, @samp{common}, @samp{powerpc}, @samp{powerpc64}, @samp{rios},
> +- at samp{rios1}, @samp{rios2}, @samp{rsc}, and @samp{rs64}.
> ++ at samp{e300c3}, @samp{e500mc}, @samp{e500mc64}, @samp{e5500},
> ++ at samp{e6500}, @samp{ec603e}, @samp{G3}, @samp{G4}, @samp{G5},
> ++ at samp{titan}, @samp{power}, @samp{power2}, @samp{power3},
> ++ at samp{power4}, @samp{power5}, @samp{power5+}, @samp{power6},
> ++ at samp{power6x}, @samp{power7}, @samp{common}, @samp{powerpc},
> ++ at samp{powerpc64}, @samp{rios}, @samp{rios1}, @samp{rios2}, @samp{rsc},
> ++and @samp{rs64}.
> +
> + @option{-mcpu=common} selects a completely generic processor. Code
> + generated under this option will run on any POWER or PowerPC processor.
> +@@ -15617,10 +15624,11 @@
> + The @option{-mcpu} options automatically enable or disable the
> + following options:
> +
> +- at gccoptlist{-maltivec -mfprnd -mhard-float -mmfcrf -mmultiple @gol
> +--mnew-mnemonics -mpopcntb -mpopcntd -mpower -mpower2 -mpowerpc64 @gol
> +--mpowerpc-gpopt -mpowerpc-gfxopt -msingle-float -mdouble-float @gol
> +--msimple-fpu -mstring -mmulhw -mdlmzb -mmfpgpr -mvsx}
> ++ at gccoptlist{-maltivec -maltivec2 -mfprnd -mhard-float -mmfcrf
> ++-mmultiple @gol -mnew-mnemonics -mpopcntb -mpopcntd -mpower -mpower2
> ++-mpowerpc64 @gol -mpowerpc-gpopt -mpowerpc-gfxopt -msingle-float
> ++-mdouble-float @gol -msimple-fpu -mstring -mmulhw -mdlmzb -mmfpgpr
> ++-mvsx}
> +
> + The particular options set for any particular CPU will vary between
> + compiler versions, depending on what setting seems to produce optimal
> +@@ -15671,6 +15679,16 @@
> + @option{-mabi=altivec} to adjust the current ABI with AltiVec ABI
> + enhancements.
> +
> ++ at item -maltivec2
> ++ at itemx -mno-altivec2
> ++ at opindex maltivec2
> ++ at opindex mno-altivec2
> ++Generate code that uses (does not use) AltiVec2 instructions, and also
> ++enable the use of built-in functions that allow more direct access to
> ++the AltiVec2 instruction set. You may also need to set
> ++ at option{-mabi=altivec} to adjust the current ABI with AltiVec ABI
> ++enhancements.
> ++
> + @item -mvrsave
> + @itemx -mno-vrsave
> + @opindex mvrsave
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-10.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-10.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-10.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-10.c 2012-03-06 12:31:05.152039004 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "lvtlx" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc lc1(long a, void *p) { return __builtin_altivec_lvtlx (a,p); }
> ++vsf llx01(long a, vsf *p) { return __builtin_vec_lvtlx (a,p); }
> ++vsf llx02(long a, sf *p) { return __builtin_vec_lvtlx (a,p); }
> ++vbi llx03(long a, vbi *p) { return __builtin_vec_lvtlx (a,p); }
> ++vsi llx04(long a, vsi *p) { return __builtin_vec_lvtlx (a,p); }
> ++vsi llx05(long a, si *p) { return __builtin_vec_lvtlx (a,p); }
> ++vui llx06(long a, vui *p) { return __builtin_vec_lvtlx (a,p); }
> ++vui llx07(long a, ui *p) { return __builtin_vec_lvtlx (a,p); }
> ++vbs llx08(long a, vbs *p) { return __builtin_vec_lvtlx (a,p); }
> ++vp llx09(long a, vp *p) { return __builtin_vec_lvtlx (a,p); }
> ++vss llx10(long a, vss *p) { return __builtin_vec_lvtlx (a,p); }
> ++vss llx11(long a, ss *p) { return __builtin_vec_lvtlx (a,p); }
> ++vus llx12(long a, vus *p) { return __builtin_vec_lvtlx (a,p); }
> ++vus llx13(long a, us *p) { return __builtin_vec_lvtlx (a,p); }
> ++vbc llx14(long a, vbc *p) { return __builtin_vec_lvtlx (a,p); }
> ++vsc llx15(long a, vsc *p) { return __builtin_vec_lvtlx (a,p); }
> ++vsc llx16(long a, sc *p) { return __builtin_vec_lvtlx (a,p); }
> ++vuc llx17(long a, vuc *p) { return __builtin_vec_lvtlx (a,p); }
> ++vuc llx18(long a, uc *p) { return __builtin_vec_lvtlx (a,p); }
> ++vsf Dllx01(long a, vsf *p) { return vec_lvtlx (a,p); }
> ++vsf Dllx02(long a, sf *p) { return vec_lvtlx (a,p); }
> ++vbi Dllx03(long a, vbi *p) { return vec_lvtlx (a,p); }
> ++vsi Dllx04(long a, vsi *p) { return vec_lvtlx (a,p); }
> ++vsi Dllx05(long a, si *p) { return vec_lvtlx (a,p); }
> ++vui Dllx06(long a, vui *p) { return vec_lvtlx (a,p); }
> ++vui Dllx07(long a, ui *p) { return vec_lvtlx (a,p); }
> ++vbs Dllx08(long a, vbs *p) { return vec_lvtlx (a,p); }
> ++vp Dllx09(long a, vp *p) { return vec_lvtlx (a,p); }
> ++vss Dllx10(long a, vss *p) { return vec_lvtlx (a,p); }
> ++vss Dllx11(long a, ss *p) { return vec_lvtlx (a,p); }
> ++vus Dllx12(long a, vus *p) { return vec_lvtlx (a,p); }
> ++vus Dllx13(long a, us *p) { return vec_lvtlx (a,p); }
> ++vbc Dllx14(long a, vbc *p) { return vec_lvtlx (a,p); }
> ++vsc Dllx15(long a, vsc *p) { return vec_lvtlx (a,p); }
> ++vsc Dllx16(long a, sc *p) { return vec_lvtlx (a,p); }
> ++vuc Dllx17(long a, vuc *p) { return vec_lvtlx (a,p); }
> ++vuc Dllx18(long a, uc *p) { return vec_lvtlx (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-11.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-11.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-11.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-11.c 2012-03-06 12:31:05.153039004 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "lvtlxl" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc lc2(long a, void *p) { return __builtin_altivec_lvtlxl (a,p); }
> ++vsf llxl01(long a, vsf *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vsf llxl02(long a, sf *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vbi llxl03(long a, vbi *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vsi llxl04(long a, vsi *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vsi llxl05(long a, si *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vui llxl06(long a, vui *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vui llxl07(long a, ui *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vbs llxl08(long a, vbs *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vp llxl09(long a, vp *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vss llxl10(long a, vss *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vss llxl11(long a, ss *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vus llxl12(long a, vus *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vus llxl13(long a, us *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vbc llxl14(long a, vbc *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vsc llxl15(long a, vsc *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vsc llxl16(long a, sc *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vuc llxl17(long a, vuc *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vuc llxl18(long a, uc *p) { return __builtin_vec_lvtlxl (a,p); }
> ++vsf Dllxl01(long a, vsf *p) { return vec_lvtlxl (a,p); }
> ++vsf Dllxl02(long a, sf *p) { return vec_lvtlxl (a,p); }
> ++vbi Dllxl03(long a, vbi *p) { return vec_lvtlxl (a,p); }
> ++vsi Dllxl04(long a, vsi *p) { return vec_lvtlxl (a,p); }
> ++vsi Dllxl05(long a, si *p) { return vec_lvtlxl (a,p); }
> ++vui Dllxl06(long a, vui *p) { return vec_lvtlxl (a,p); }
> ++vui Dllxl07(long a, ui *p) { return vec_lvtlxl (a,p); }
> ++vbs Dllxl08(long a, vbs *p) { return vec_lvtlxl (a,p); }
> ++vp Dllxl09(long a, vp *p) { return vec_lvtlxl (a,p); }
> ++vss Dllxl10(long a, vss *p) { return vec_lvtlxl (a,p); }
> ++vss Dllxl11(long a, ss *p) { return vec_lvtlxl (a,p); }
> ++vus Dllxl12(long a, vus *p) { return vec_lvtlxl (a,p); }
> ++vus Dllxl13(long a, us *p) { return vec_lvtlxl (a,p); }
> ++vbc Dllxl14(long a, vbc *p) { return vec_lvtlxl (a,p); }
> ++vsc Dllxl15(long a, vsc *p) { return vec_lvtlxl (a,p); }
> ++vsc Dllxl16(long a, sc *p) { return vec_lvtlxl (a,p); }
> ++vuc Dllxl17(long a, vuc *p) { return vec_lvtlxl (a,p); }
> ++vuc Dllxl18(long a, uc *p) { return vec_lvtlxl (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-12.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-12.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-12.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-12.c 2012-03-06 12:31:05.153039004 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "lvtrx" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc lc3(long a, void *p) { return __builtin_altivec_lvtrx (a,p); }
> ++vsf lrx01(long a, vsf *p) { return __builtin_vec_lvtrx (a,p); }
> ++vsf lrx02(long a, sf *p) { return __builtin_vec_lvtrx (a,p); }
> ++vbi lrx03(long a, vbi *p) { return __builtin_vec_lvtrx (a,p); }
> ++vsi lrx04(long a, vsi *p) { return __builtin_vec_lvtrx (a,p); }
> ++vsi lrx05(long a, si *p) { return __builtin_vec_lvtrx (a,p); }
> ++vui lrx06(long a, vui *p) { return __builtin_vec_lvtrx (a,p); }
> ++vui lrx07(long a, ui *p) { return __builtin_vec_lvtrx (a,p); }
> ++vbs lrx08(long a, vbs *p) { return __builtin_vec_lvtrx (a,p); }
> ++vp lrx09(long a, vp *p) { return __builtin_vec_lvtrx (a,p); }
> ++vss lrx10(long a, vss *p) { return __builtin_vec_lvtrx (a,p); }
> ++vss lrx11(long a, ss *p) { return __builtin_vec_lvtrx (a,p); }
> ++vus lrx12(long a, vus *p) { return __builtin_vec_lvtrx (a,p); }
> ++vus lrx13(long a, us *p) { return __builtin_vec_lvtrx (a,p); }
> ++vbc lrx14(long a, vbc *p) { return __builtin_vec_lvtrx (a,p); }
> ++vsc lrx15(long a, vsc *p) { return __builtin_vec_lvtrx (a,p); }
> ++vsc lrx16(long a, sc *p) { return __builtin_vec_lvtrx (a,p); }
> ++vuc lrx17(long a, vuc *p) { return __builtin_vec_lvtrx (a,p); }
> ++vuc lrx18(long a, uc *p) { return __builtin_vec_lvtrx (a,p); }
> ++vsf Dlrx01(long a, vsf *p) { return vec_lvtrx (a,p); }
> ++vsf Dlrx02(long a, sf *p) { return vec_lvtrx (a,p); }
> ++vbi Dlrx03(long a, vbi *p) { return vec_lvtrx (a,p); }
> ++vsi Dlrx04(long a, vsi *p) { return vec_lvtrx (a,p); }
> ++vsi Dlrx05(long a, si *p) { return vec_lvtrx (a,p); }
> ++vui Dlrx06(long a, vui *p) { return vec_lvtrx (a,p); }
> ++vui Dlrx07(long a, ui *p) { return vec_lvtrx (a,p); }
> ++vbs Dlrx08(long a, vbs *p) { return vec_lvtrx (a,p); }
> ++vp Dlrx09(long a, vp *p) { return vec_lvtrx (a,p); }
> ++vss Dlrx10(long a, vss *p) { return vec_lvtrx (a,p); }
> ++vss Dlrx11(long a, ss *p) { return vec_lvtrx (a,p); }
> ++vus Dlrx12(long a, vus *p) { return vec_lvtrx (a,p); }
> ++vus Dlrx13(long a, us *p) { return vec_lvtrx (a,p); }
> ++vbc Dlrx14(long a, vbc *p) { return vec_lvtrx (a,p); }
> ++vsc Dlrx15(long a, vsc *p) { return vec_lvtrx (a,p); }
> ++vsc Dlrx16(long a, sc *p) { return vec_lvtrx (a,p); }
> ++vuc Dlrx17(long a, vuc *p) { return vec_lvtrx (a,p); }
> ++vuc Dlrx18(long a, uc *p) { return vec_lvtrx (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-13.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-13.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-13.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-13.c 2012-03-06 12:31:05.153039004 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "lvtrxl" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc lc4(long a, void *p) { return __builtin_altivec_lvtrxl (a,p); }
> ++vsf lrxl01(long a, vsf *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vsf lrxl02(long a, sf *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vbi lrxl03(long a, vbi *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vsi lrxl04(long a, vsi *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vsi lrxl05(long a, si *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vui lrxl06(long a, vui *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vui lrxl07(long a, ui *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vbs lrxl08(long a, vbs *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vp lrxl09(long a, vp *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vss lrxl10(long a, vss *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vss lrxl11(long a, ss *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vus lrxl12(long a, vus *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vus lrxl13(long a, us *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vbc lrxl14(long a, vbc *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vsc lrxl15(long a, vsc *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vsc lrxl16(long a, sc *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vuc lrxl17(long a, vuc *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vuc lrxl18(long a, uc *p) { return __builtin_vec_lvtrxl (a,p); }
> ++vsf Dlrxl01(long a, vsf *p) { return vec_lvtrxl (a,p); }
> ++vsf Dlrxl02(long a, sf *p) { return vec_lvtrxl (a,p); }
> ++vbi Dlrxl03(long a, vbi *p) { return vec_lvtrxl (a,p); }
> ++vsi Dlrxl04(long a, vsi *p) { return vec_lvtrxl (a,p); }
> ++vsi Dlrxl05(long a, si *p) { return vec_lvtrxl (a,p); }
> ++vui Dlrxl06(long a, vui *p) { return vec_lvtrxl (a,p); }
> ++vui Dlrxl07(long a, ui *p) { return vec_lvtrxl (a,p); }
> ++vbs Dlrxl08(long a, vbs *p) { return vec_lvtrxl (a,p); }
> ++vp Dlrxl09(long a, vp *p) { return vec_lvtrxl (a,p); }
> ++vss Dlrxl10(long a, vss *p) { return vec_lvtrxl (a,p); }
> ++vss Dlrxl11(long a, ss *p) { return vec_lvtrxl (a,p); }
> ++vus Dlrxl12(long a, vus *p) { return vec_lvtrxl (a,p); }
> ++vus Dlrxl13(long a, us *p) { return vec_lvtrxl (a,p); }
> ++vbc Dlrxl14(long a, vbc *p) { return vec_lvtrxl (a,p); }
> ++vsc Dlrxl15(long a, vsc *p) { return vec_lvtrxl (a,p); }
> ++vsc Dlrxl16(long a, sc *p) { return vec_lvtrxl (a,p); }
> ++vuc Dlrxl17(long a, vuc *p) { return vec_lvtrxl (a,p); }
> ++vuc Dlrxl18(long a, uc *p) { return vec_lvtrxl (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-14.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-14.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-14.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-14.c 2012-03-06 12:31:05.154039003 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "stvflx" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void sc1(vsc v, long a, void *p) { __builtin_altivec_stvflx (v,a,p); }
> ++void slx01(vsf v, long a, vsf *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx02(vsf v, long a, sf *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx03(vbi v, long a, vbi *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx04(vsi v, long a, vsi *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx05(vsi v, long a, si *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx06(vui v, long a, vui *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx07(vui v, long a, ui *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx08(vbs v, long a, vbs *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx09(vp v, long a, vp *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx10(vss v, long a, vss *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx11(vss v, long a, ss *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx12(vus v, long a, vus *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx13(vus v, long a, us *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx14(vbc v, long a, vbc *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx15(vsc v, long a, vsc *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx16(vsc v, long a, sc *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx17(vuc v, long a, vuc *p) { __builtin_vec_stvflx (v,a,p); }
> ++void slx18(vuc v, long a, uc *p) { __builtin_vec_stvflx (v,a,p); }
> ++void Dslx01(vsf v, long a, vsf *p) { vec_stvflx (v,a,p); }
> ++void Dslx02(vsf v, long a, sf *p) { vec_stvflx (v,a,p); }
> ++void Dslx03(vbi v, long a, vbi *p) { vec_stvflx (v,a,p); }
> ++void Dslx04(vsi v, long a, vsi *p) { vec_stvflx (v,a,p); }
> ++void Dslx05(vsi v, long a, si *p) { vec_stvflx (v,a,p); }
> ++void Dslx06(vui v, long a, vui *p) { vec_stvflx (v,a,p); }
> ++void Dslx07(vui v, long a, ui *p) { vec_stvflx (v,a,p); }
> ++void Dslx08(vbs v, long a, vbs *p) { vec_stvflx (v,a,p); }
> ++void Dslx09(vp v, long a, vp *p) { vec_stvflx (v,a,p); }
> ++void Dslx10(vss v, long a, vss *p) { vec_stvflx (v,a,p); }
> ++void Dslx11(vss v, long a, ss *p) { vec_stvflx (v,a,p); }
> ++void Dslx12(vus v, long a, vus *p) { vec_stvflx (v,a,p); }
> ++void Dslx13(vus v, long a, us *p) { vec_stvflx (v,a,p); }
> ++void Dslx14(vbc v, long a, vbc *p) { vec_stvflx (v,a,p); }
> ++void Dslx15(vsc v, long a, vsc *p) { vec_stvflx (v,a,p); }
> ++void Dslx16(vsc v, long a, sc *p) { vec_stvflx (v,a,p); }
> ++void Dslx17(vuc v, long a, vuc *p) { vec_stvflx (v,a,p); }
> ++void Dslx18(vuc v, long a, uc *p) { vec_stvflx (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-15.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-15.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-15.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-15.c 2012-03-06 12:31:05.154039003 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "stvflxl" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void sc2(vsc v, long a, void *p) { __builtin_altivec_stvflxl (v,a,p); }
> ++void slxl01(vsf v, long a, vsf *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl02(vsf v, long a, sf *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl03(vbi v, long a, vbi *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl04(vsi v, long a, vsi *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl05(vsi v, long a, si *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl06(vui v, long a, vui *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl07(vui v, long a, ui *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl08(vbs v, long a, vbs *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl09(vp v, long a, vp *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl10(vss v, long a, vss *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl11(vss v, long a, ss *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl12(vus v, long a, vus *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl13(vus v, long a, us *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl14(vbc v, long a, vbc *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl15(vsc v, long a, vsc *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl16(vsc v, long a, sc *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl17(vuc v, long a, vuc *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void slxl18(vuc v, long a, uc *p) { __builtin_vec_stvflxl (v,a,p); }
> ++void Dslxl01(vsf v, long a, vsf *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl02(vsf v, long a, sf *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl03(vbi v, long a, vbi *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl04(vsi v, long a, vsi *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl05(vsi v, long a, si *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl06(vui v, long a, vui *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl07(vui v, long a, ui *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl08(vbs v, long a, vbs *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl09(vp v, long a, vp *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl10(vss v, long a, vss *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl11(vss v, long a, ss *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl12(vus v, long a, vus *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl13(vus v, long a, us *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl14(vbc v, long a, vbc *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl15(vsc v, long a, vsc *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl16(vsc v, long a, sc *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl17(vuc v, long a, vuc *p) { vec_stvflxl (v,a,p); }
> ++void Dslxl18(vuc v, long a, uc *p) { vec_stvflxl (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-16.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-16.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-16.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-16.c 2012-03-06 12:31:05.154039003 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "stvfrx" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void sc3(vsc v, long a, void *p) { __builtin_altivec_stvfrx (v,a,p); }
> ++void srx01(vsf v, long a, vsf *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx02(vsf v, long a, sf *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx03(vbi v, long a, vbi *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx04(vsi v, long a, vsi *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx05(vsi v, long a, si *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx06(vui v, long a, vui *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx07(vui v, long a, ui *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx08(vbs v, long a, vbs *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx09(vp v, long a, vp *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx10(vss v, long a, vss *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx11(vss v, long a, ss *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx12(vus v, long a, vus *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx13(vus v, long a, us *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx14(vbc v, long a, vbc *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx15(vsc v, long a, vsc *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx16(vsc v, long a, sc *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx17(vuc v, long a, vuc *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void srx18(vuc v, long a, uc *p) { __builtin_vec_stvfrx (v,a,p); }
> ++void Dsrx01(vsf v, long a, vsf *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx02(vsf v, long a, sf *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx03(vbi v, long a, vbi *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx04(vsi v, long a, vsi *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx05(vsi v, long a, si *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx06(vui v, long a, vui *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx07(vui v, long a, ui *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx08(vbs v, long a, vbs *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx09(vp v, long a, vp *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx10(vss v, long a, vss *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx11(vss v, long a, ss *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx12(vus v, long a, vus *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx13(vus v, long a, us *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx14(vbc v, long a, vbc *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx15(vsc v, long a, vsc *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx16(vsc v, long a, sc *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx17(vuc v, long a, vuc *p) { vec_stvfrx (v,a,p); }
> ++void Dsrx18(vuc v, long a, uc *p) { vec_stvfrx (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-17.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-17.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-17.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-17.c 2012-03-06 12:31:05.155039001 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "stvfrxl" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void sc4(vsc v, long a, void *p) { __builtin_altivec_stvfrxl (v,a,p); }
> ++void srxl01(vsf v, long a, vsf *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl02(vsf v, long a, sf *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl03(vbi v, long a, vbi *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl04(vsi v, long a, vsi *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl05(vsi v, long a, si *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl06(vui v, long a, vui *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl07(vui v, long a, ui *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl08(vbs v, long a, vbs *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl09(vp v, long a, vp *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl10(vss v, long a, vss *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl11(vss v, long a, ss *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl12(vus v, long a, vus *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl13(vus v, long a, us *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl14(vbc v, long a, vbc *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl15(vsc v, long a, vsc *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl16(vsc v, long a, sc *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl17(vuc v, long a, vuc *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void srxl18(vuc v, long a, uc *p) { __builtin_vec_stvfrxl (v,a,p); }
> ++void Dsrxl01(vsf v, long a, vsf *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl02(vsf v, long a, sf *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl03(vbi v, long a, vbi *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl04(vsi v, long a, vsi *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl05(vsi v, long a, si *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl06(vui v, long a, vui *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl07(vui v, long a, ui *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl08(vbs v, long a, vbs *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl09(vp v, long a, vp *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl10(vss v, long a, vss *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl11(vss v, long a, ss *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl12(vus v, long a, vus *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl13(vus v, long a, us *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl14(vbc v, long a, vbc *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl15(vsc v, long a, vsc *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl16(vsc v, long a, sc *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl17(vuc v, long a, vuc *p) { vec_stvfrxl (v,a,p); }
> ++void Dsrxl18(vuc v, long a, uc *p) { vec_stvfrxl (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-18.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-18.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-18.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-18.c 2012-03-06 12:31:05.155039001 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "lvswx" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc ls1(long a, void *p) { return __builtin_altivec_lvswx (a,p); }
> ++vsf ls01(long a, vsf *p) { return __builtin_vec_lvswx (a,p); }
> ++vsf ls02(long a, sf *p) { return __builtin_vec_lvswx (a,p); }
> ++vbi ls03(long a, vbi *p) { return __builtin_vec_lvswx (a,p); }
> ++vsi ls04(long a, vsi *p) { return __builtin_vec_lvswx (a,p); }
> ++vsi ls05(long a, si *p) { return __builtin_vec_lvswx (a,p); }
> ++vui ls06(long a, vui *p) { return __builtin_vec_lvswx (a,p); }
> ++vui ls07(long a, ui *p) { return __builtin_vec_lvswx (a,p); }
> ++vbs ls08(long a, vbs *p) { return __builtin_vec_lvswx (a,p); }
> ++vp ls09(long a, vp *p) { return __builtin_vec_lvswx (a,p); }
> ++vss ls10(long a, vss *p) { return __builtin_vec_lvswx (a,p); }
> ++vss ls11(long a, ss *p) { return __builtin_vec_lvswx (a,p); }
> ++vus ls12(long a, vus *p) { return __builtin_vec_lvswx (a,p); }
> ++vus ls13(long a, us *p) { return __builtin_vec_lvswx (a,p); }
> ++vbc ls14(long a, vbc *p) { return __builtin_vec_lvswx (a,p); }
> ++vsc ls15(long a, vsc *p) { return __builtin_vec_lvswx (a,p); }
> ++vsc ls16(long a, sc *p) { return __builtin_vec_lvswx (a,p); }
> ++vuc ls17(long a, vuc *p) { return __builtin_vec_lvswx (a,p); }
> ++vuc ls18(long a, uc *p) { return __builtin_vec_lvswx (a,p); }
> ++vsf Dls01(long a, vsf *p) { return vec_lvswx (a,p); }
> ++vsf Dls02(long a, sf *p) { return vec_lvswx (a,p); }
> ++vbi Dls03(long a, vbi *p) { return vec_lvswx (a,p); }
> ++vsi Dls04(long a, vsi *p) { return vec_lvswx (a,p); }
> ++vsi Dls05(long a, si *p) { return vec_lvswx (a,p); }
> ++vui Dls06(long a, vui *p) { return vec_lvswx (a,p); }
> ++vui Dls07(long a, ui *p) { return vec_lvswx (a,p); }
> ++vbs Dls08(long a, vbs *p) { return vec_lvswx (a,p); }
> ++vp Dls09(long a, vp *p) { return vec_lvswx (a,p); }
> ++vss Dls10(long a, vss *p) { return vec_lvswx (a,p); }
> ++vss Dls11(long a, ss *p) { return vec_lvswx (a,p); }
> ++vus Dls12(long a, vus *p) { return vec_lvswx (a,p); }
> ++vus Dls13(long a, us *p) { return vec_lvswx (a,p); }
> ++vbc Dls14(long a, vbc *p) { return vec_lvswx (a,p); }
> ++vsc Dls15(long a, vsc *p) { return vec_lvswx (a,p); }
> ++vsc Dls16(long a, sc *p) { return vec_lvswx (a,p); }
> ++vuc Dls17(long a, vuc *p) { return vec_lvswx (a,p); }
> ++vuc Dls18(long a, uc *p) { return vec_lvswx (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-19.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-19.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-19.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-19.c 2012-03-06 12:31:05.155039001 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "lvswxl" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc ls2l(long a, void *p) { return __builtin_altivec_lvswxl (a,p); }
> ++vsf lsl01(long a, vsf *p) { return __builtin_vec_lvswxl (a,p); }
> ++vsf lsl02(long a, sf *p) { return __builtin_vec_lvswxl (a,p); }
> ++vbi lsl03(long a, vbi *p) { return __builtin_vec_lvswxl (a,p); }
> ++vsi lsl04(long a, vsi *p) { return __builtin_vec_lvswxl (a,p); }
> ++vsi lsl05(long a, si *p) { return __builtin_vec_lvswxl (a,p); }
> ++vui lsl06(long a, vui *p) { return __builtin_vec_lvswxl (a,p); }
> ++vui lsl07(long a, ui *p) { return __builtin_vec_lvswxl (a,p); }
> ++vbs lsl08(long a, vbs *p) { return __builtin_vec_lvswxl (a,p); }
> ++vp lsl09(long a, vp *p) { return __builtin_vec_lvswxl (a,p); }
> ++vss lsl10(long a, vss *p) { return __builtin_vec_lvswxl (a,p); }
> ++vss lsl11(long a, ss *p) { return __builtin_vec_lvswxl (a,p); }
> ++vus lsl12(long a, vus *p) { return __builtin_vec_lvswxl (a,p); }
> ++vus lsl13(long a, us *p) { return __builtin_vec_lvswxl (a,p); }
> ++vbc lsl14(long a, vbc *p) { return __builtin_vec_lvswxl (a,p); }
> ++vsc lsl15(long a, vsc *p) { return __builtin_vec_lvswxl (a,p); }
> ++vsc lsl16(long a, sc *p) { return __builtin_vec_lvswxl (a,p); }
> ++vuc lsl17(long a, vuc *p) { return __builtin_vec_lvswxl (a,p); }
> ++vuc lsl18(long a, uc *p) { return __builtin_vec_lvswxl (a,p); }
> ++vsf Dlsl01(long a, vsf *p) { return vec_lvswxl (a,p); }
> ++vsf Dlsl02(long a, sf *p) { return vec_lvswxl (a,p); }
> ++vbi Dlsl03(long a, vbi *p) { return vec_lvswxl (a,p); }
> ++vsi Dlsl04(long a, vsi *p) { return vec_lvswxl (a,p); }
> ++vsi Dlsl05(long a, si *p) { return vec_lvswxl (a,p); }
> ++vui Dlsl06(long a, vui *p) { return vec_lvswxl (a,p); }
> ++vui Dlsl07(long a, ui *p) { return vec_lvswxl (a,p); }
> ++vbs Dlsl08(long a, vbs *p) { return vec_lvswxl (a,p); }
> ++vp Dlsl09(long a, vp *p) { return vec_lvswxl (a,p); }
> ++vss Dlsl10(long a, vss *p) { return vec_lvswxl (a,p); }
> ++vss Dlsl11(long a, ss *p) { return vec_lvswxl (a,p); }
> ++vus Dlsl12(long a, vus *p) { return vec_lvswxl (a,p); }
> ++vus Dlsl13(long a, us *p) { return vec_lvswxl (a,p); }
> ++vbc Dlsl14(long a, vbc *p) { return vec_lvswxl (a,p); }
> ++vsc Dlsl15(long a, vsc *p) { return vec_lvswxl (a,p); }
> ++vsc Dlsl16(long a, sc *p) { return vec_lvswxl (a,p); }
> ++vuc Dlsl17(long a, vuc *p) { return vec_lvswxl (a,p); }
> ++vuc Dlsl18(long a, uc *p) { return vec_lvswxl (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-1.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-1.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-1.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-1.c 2012-03-06 12:31:05.156039000 -0600
> +@@ -0,0 +1,36 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "vabsdub" 7 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vuc fa1b(vuc a, vuc b) { return __builtin_altivec_vabsdub (a,b); }
> ++vuc ad1(vuc a, vuc b) { return __builtin_vec_absd (a,b); }
> ++vuc ad2(vbc a, vuc b) { return __builtin_vec_absd (a,b); }
> ++vuc ad3(vuc a, vbc b) { return __builtin_vec_absd (a,b); }
> ++vuc Dad1(vuc a, vuc b) { return vec_absd (a,b); }
> ++vuc Dad2(vbc a, vuc b) { return vec_absd (a,b); }
> ++vuc Dad3(vuc a, vbc b) { return vec_absd (a,b); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-20.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-20.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-20.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-20.c 2012-03-06 12:31:05.156039000 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "stvswx" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void ss1(vsc v, long a, vsc *p) { __builtin_altivec_stvswx (v,a,p); }
> ++void ssx01(vsf v, long a, vsf *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx02(vsf v, long a, sf *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx03(vbi v, long a, vbi *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx04(vsi v, long a, vsi *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx05(vsi v, long a, si *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx06(vui v, long a, vui *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx07(vui v, long a, ui *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx08(vbs v, long a, vbs *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx09(vp v, long a, vp *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx10(vss v, long a, vss *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx11(vss v, long a, ss *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx12(vus v, long a, vus *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx13(vus v, long a, us *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx14(vbc v, long a, vbc *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx15(vsc v, long a, vsc *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx16(vsc v, long a, sc *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx17(vuc v, long a, vuc *p) { __builtin_vec_stvswx (v,a,p); }
> ++void ssx18(vuc v, long a, uc *p) { __builtin_vec_stvswx (v,a,p); }
> ++void Dssx01(vsf v, long a, vsf *p) { vec_stvswx (v,a,p); }
> ++void Dssx02(vsf v, long a, sf *p) { vec_stvswx (v,a,p); }
> ++void Dssx03(vbi v, long a, vbi *p) { vec_stvswx (v,a,p); }
> ++void Dssx04(vsi v, long a, vsi *p) { vec_stvswx (v,a,p); }
> ++void Dssx05(vsi v, long a, si *p) { vec_stvswx (v,a,p); }
> ++void Dssx06(vui v, long a, vui *p) { vec_stvswx (v,a,p); }
> ++void Dssx07(vui v, long a, ui *p) { vec_stvswx (v,a,p); }
> ++void Dssx08(vbs v, long a, vbs *p) { vec_stvswx (v,a,p); }
> ++void Dssx09(vp v, long a, vp *p) { vec_stvswx (v,a,p); }
> ++void Dssx10(vss v, long a, vss *p) { vec_stvswx (v,a,p); }
> ++void Dssx11(vss v, long a, ss *p) { vec_stvswx (v,a,p); }
> ++void Dssx12(vus v, long a, vus *p) { vec_stvswx (v,a,p); }
> ++void Dssx13(vus v, long a, us *p) { vec_stvswx (v,a,p); }
> ++void Dssx14(vbc v, long a, vbc *p) { vec_stvswx (v,a,p); }
> ++void Dssx15(vsc v, long a, vsc *p) { vec_stvswx (v,a,p); }
> ++void Dssx16(vsc v, long a, sc *p) { vec_stvswx (v,a,p); }
> ++void Dssx17(vuc v, long a, vuc *p) { vec_stvswx (v,a,p); }
> ++void Dssx18(vuc v, long a, uc *p) { vec_stvswx (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-21.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-21.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-21.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-21.c 2012-03-06 12:31:05.156039000 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "stvswxl" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void ss2l(vsc v, long a, vsc *p) { __builtin_altivec_stvswxl (v,a,p); }
> ++void ssxl01(vsf v, long a, vsf *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl02(vsf v, long a, sf *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl03(vbi v, long a, vbi *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl04(vsi v, long a, vsi *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl05(vsi v, long a, si *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl06(vui v, long a, vui *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl07(vui v, long a, ui *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl08(vbs v, long a, vbs *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl09(vp v, long a, vp *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl10(vss v, long a, vss *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl11(vss v, long a, ss *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl12(vus v, long a, vus *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl13(vus v, long a, us *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl14(vbc v, long a, vbc *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl15(vsc v, long a, vsc *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl16(vsc v, long a, sc *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl17(vuc v, long a, vuc *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void ssxl18(vuc v, long a, uc *p) { __builtin_vec_stvswxl (v,a,p); }
> ++void Dssxl01(vsf v, long a, vsf *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl02(vsf v, long a, sf *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl03(vbi v, long a, vbi *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl04(vsi v, long a, vsi *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl05(vsi v, long a, si *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl06(vui v, long a, vui *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl07(vui v, long a, ui *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl08(vbs v, long a, vbs *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl09(vp v, long a, vp *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl10(vss v, long a, vss *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl11(vss v, long a, ss *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl12(vus v, long a, vus *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl13(vus v, long a, us *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl14(vbc v, long a, vbc *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl15(vsc v, long a, vsc *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl16(vsc v, long a, sc *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl17(vuc v, long a, vuc *p) { vec_stvswxl (v,a,p); }
> ++void Dssxl18(vuc v, long a, uc *p) { vec_stvswxl (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-22.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-22.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-22.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-22.c 2012-03-06 12:31:05.157039001 -0600
> +@@ -0,0 +1,66 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "lvsm" 37 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc lsm(long a, void *p) { return __builtin_altivec_lvsm (a,p); }
> ++vsf lm01(long a, vsf *p) { return __builtin_vec_lvsm (a,p); }
> ++vsf lm02(long a, sf *p) { return __builtin_vec_lvsm (a,p); }
> ++vbi lm03(long a, vbi *p) { return __builtin_vec_lvsm (a,p); }
> ++vsi lm04(long a, vsi *p) { return __builtin_vec_lvsm (a,p); }
> ++vsi lm05(long a, si *p) { return __builtin_vec_lvsm (a,p); }
> ++vui lm06(long a, vui *p) { return __builtin_vec_lvsm (a,p); }
> ++vui lm07(long a, ui *p) { return __builtin_vec_lvsm (a,p); }
> ++vbs lm08(long a, vbs *p) { return __builtin_vec_lvsm (a,p); }
> ++vp lm09(long a, vp *p) { return __builtin_vec_lvsm (a,p); }
> ++vss lm10(long a, vss *p) { return __builtin_vec_lvsm (a,p); }
> ++vss lm11(long a, ss *p) { return __builtin_vec_lvsm (a,p); }
> ++vus lm12(long a, vus *p) { return __builtin_vec_lvsm (a,p); }
> ++vus lm13(long a, us *p) { return __builtin_vec_lvsm (a,p); }
> ++vbc lm14(long a, vbc *p) { return __builtin_vec_lvsm (a,p); }
> ++vsc lm15(long a, vsc *p) { return __builtin_vec_lvsm (a,p); }
> ++vsc lm16(long a, sc *p) { return __builtin_vec_lvsm (a,p); }
> ++vuc lm17(long a, vuc *p) { return __builtin_vec_lvsm (a,p); }
> ++vuc lm18(long a, uc *p) { return __builtin_vec_lvsm (a,p); }
> ++vsf Dlm01(long a, vsf *p) { return vec_lvsm (a,p); }
> ++vsf Dlm02(long a, sf *p) { return vec_lvsm (a,p); }
> ++vbi Dlm03(long a, vbi *p) { return vec_lvsm (a,p); }
> ++vsi Dlm04(long a, vsi *p) { return vec_lvsm (a,p); }
> ++vsi Dlm05(long a, si *p) { return vec_lvsm (a,p); }
> ++vui Dlm06(long a, vui *p) { return vec_lvsm (a,p); }
> ++vui Dlm07(long a, ui *p) { return vec_lvsm (a,p); }
> ++vbs Dlm08(long a, vbs *p) { return vec_lvsm (a,p); }
> ++vp Dlm09(long a, vp *p) { return vec_lvsm (a,p); }
> ++vss Dlm10(long a, vss *p) { return vec_lvsm (a,p); }
> ++vss Dlm11(long a, ss *p) { return vec_lvsm (a,p); }
> ++vus Dlm12(long a, vus *p) { return vec_lvsm (a,p); }
> ++vus Dlm13(long a, us *p) { return vec_lvsm (a,p); }
> ++vbc Dlm14(long a, vbc *p) { return vec_lvsm (a,p); }
> ++vsc Dlm15(long a, vsc *p) { return vec_lvsm (a,p); }
> ++vsc Dlm16(long a, sc *p) { return vec_lvsm (a,p); }
> ++vuc Dlm17(long a, vuc *p) { return vec_lvsm (a,p); }
> ++vuc Dlm18(long a, uc *p) { return vec_lvsm (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-2.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-2.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-2.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-2.c 2012-03-06 12:31:05.157039001 -0600
> +@@ -0,0 +1,36 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "vabsduh" 7 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vus fa2h(vus a, vus b) { return __builtin_altivec_vabsduh (a,b); }
> ++vus ad4(vus a, vus b) { return __builtin_vec_absd (a,b); }
> ++vus ad5(vbs a, vus b) { return __builtin_vec_absd (a,b); }
> ++vus ad6(vus a, vbs b) { return __builtin_vec_absd (a,b); }
> ++vus Dad4(vus a, vus b) { return vec_absd (a,b); }
> ++vus Dad5(vbs a, vus b) { return vec_absd (a,b); }
> ++vus Dad6(vus a, vbs b) { return vec_absd (a,b); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-3.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-3.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-3.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-3.c 2012-03-06 12:31:05.157039001 -0600
> +@@ -0,0 +1,36 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "vabsduw" 7 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vui fa3w(vui a, vui b) { return __builtin_altivec_vabsduw (a,b); }
> ++vui ad7(vui a, vui b) { return __builtin_vec_absd (a,b); }
> ++vui ad8(vbi a, vui b) { return __builtin_vec_absd (a,b); }
> ++vui ad9(vui a, vbi b) { return __builtin_vec_absd (a,b); }
> ++vui Dad7(vui a, vui b) { return vec_absd (a,b); }
> ++vui Dad8(vbi a, vui b) { return vec_absd (a,b); }
> ++vui Dad9(vui a, vbi b) { return vec_absd (a,b); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-4.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-4.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-4.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-4.c 2012-03-06 12:31:05.158039002 -0600
> +@@ -0,0 +1,34 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "lvexbx" 5 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc le1b(long a, void *p) { return __builtin_altivec_lvexbx (a,p); }
> ++vsc leb1(long a, sc *p) { return __builtin_vec_lvexbx (a,p); }
> ++vuc leb2(long a, uc *p) { return __builtin_vec_lvexbx (a,p); }
> ++vsc Dleb1(long a, sc *p) { return vec_lvexbx (a,p); }
> ++vuc Dleb2(long a, uc *p) { return vec_lvexbx (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-5.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-5.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-5.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-5.c 2012-03-06 12:31:05.158039002 -0600
> +@@ -0,0 +1,34 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "lvexhx" 5 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vss le2h(long a, void *p) { return __builtin_altivec_lvexhx (a,p); }
> ++vss leh1(long a, ss *p) { return __builtin_vec_lvexhx (a,p); }
> ++vus leh2(long a, us *p) { return __builtin_vec_lvexhx (a,p); }
> ++vss Dleh1(long a, ss *p) { return vec_lvexhx (a,p); }
> ++vus Dleh2(long a, us *p) { return vec_lvexhx (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-6.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-6.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-6.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-6.c 2012-03-06 12:31:05.158039002 -0600
> +@@ -0,0 +1,40 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "lvexwx" 11 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsi le3w(long a, void *p) { return __builtin_altivec_lvexwx (a,p); }
> ++vsf lew1(long a, sf *p) { return __builtin_vec_lvexwx (a,p); }
> ++vsi lew2(long a, si *p) { return __builtin_vec_lvexwx (a,p); }
> ++vui lew3(long a, ui *p) { return __builtin_vec_lvexwx (a,p); }
> ++vsi lew4(long a, sl *p) { return __builtin_vec_lvexwx (a,p); }
> ++vui lew5(long a, ul *p) { return __builtin_vec_lvexwx (a,p); }
> ++vsf Dlew1(long a, sf *p) { return vec_lvexwx (a,p); }
> ++vsi Dlew2(long a, si *p) { return vec_lvexwx (a,p); }
> ++vui Dlew3(long a, ui *p) { return vec_lvexwx (a,p); }
> ++vsi Dlew4(long a, sl *p) { return vec_lvexwx (a,p); }
> ++vui Dlew5(long a, ul *p) { return vec_lvexwx (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-7.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-7.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-7.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-7.c 2012-03-06 12:31:05.159039002 -0600
> +@@ -0,0 +1,42 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "stvexbx" 13 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void se1b(vsc v, long a, vsc *p) { __builtin_altivec_stvexbx (v,a,p); }
> ++void seb1(vsc v, long a, sc *p) { __builtin_vec_stvexbx (v,a,p); }
> ++void seb2(vuc v, long a, uc *p) { __builtin_vec_stvexbx (v,a,p); }
> ++void seb3(vbc v, long a, sc *p) { __builtin_vec_stvexbx (v,a,p); }
> ++void seb4(vbc v, long a, uc *p) { __builtin_vec_stvexbx (v,a,p); }
> ++void seb5(vsc v, long a, void *p) { __builtin_vec_stvexbx (v,a,p); }
> ++void seb6(vuc v, long a, void *p) { __builtin_vec_stvexbx (v,a,p); }
> ++void Dseb1(vsc v, long a, sc *p) { vec_stvexbx (v,a,p); }
> ++void Dseb2(vuc v, long a, uc *p) { vec_stvexbx (v,a,p); }
> ++void Dseb3(vbc v, long a, sc *p) { vec_stvexbx (v,a,p); }
> ++void Dseb4(vbc v, long a, uc *p) { vec_stvexbx (v,a,p); }
> ++void Dseb5(vsc v, long a, void *p) { vec_stvexbx (v,a,p); }
> ++void Dseb6(vuc v, long a, void *p) { vec_stvexbx (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-8.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-8.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-8.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-8.c 2012-03-06 12:31:05.159039002 -0600
> +@@ -0,0 +1,42 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "stvexhx" 13 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void se2h(vss v, long a, vss *p) { __builtin_altivec_stvexhx (v,a,p); }
> ++void seh1(vss v, long a, ss *p) { __builtin_vec_stvexhx (v,a,p); }
> ++void seh2(vus v, long a, us *p) { __builtin_vec_stvexhx (v,a,p); }
> ++void seh3(vbs v, long a, ss *p) { __builtin_vec_stvexhx (v,a,p); }
> ++void seh4(vbs v, long a, us *p) { __builtin_vec_stvexhx (v,a,p); }
> ++void seh5(vss v, long a, void *p) { __builtin_vec_stvexhx (v,a,p); }
> ++void seh6(vus v, long a, void *p) { __builtin_vec_stvexhx (v,a,p); }
> ++void Dseh1(vss v, long a, ss *p) { vec_stvexhx (v,a,p); }
> ++void Dseh2(vus v, long a, us *p) { vec_stvexhx (v,a,p); }
> ++void Dseh3(vbs v, long a, ss *p) { vec_stvexhx (v,a,p); }
> ++void Dseh4(vbs v, long a, us *p) { vec_stvexhx (v,a,p); }
> ++void Dseh5(vss v, long a, void *p) { vec_stvexhx (v,a,p); }
> ++void Dseh6(vus v, long a, void *p) { vec_stvexhx (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-9.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-9.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-9.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/altivec2_builtin-9.c 2012-03-06 12:31:05.159039002 -0600
> +@@ -0,0 +1,46 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -maltivec2" } */
> ++/* { dg-final { scan-assembler-times "stvexwx" 17 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void se3w(vsi v, long a, vsi *p) { __builtin_altivec_stvexwx (v,a,p); }
> ++void sew1(vsf v, long a, sf *p) { __builtin_vec_stvexwx (v,a,p); }
> ++void sew2(vsi v, long a, si *p) { __builtin_vec_stvexwx (v,a,p); }
> ++void sew3(vui v, long a, ui *p) { __builtin_vec_stvexwx (v,a,p); }
> ++void sew4(vbi v, long a, si *p) { __builtin_vec_stvexwx (v,a,p); }
> ++void sew5(vbi v, long a, ui *p) { __builtin_vec_stvexwx (v,a,p); }
> ++void sew6(vsf v, long a, void *p) { __builtin_vec_stvexwx (v,a,p); }
> ++void sew7(vsi v, long a, void *p) { __builtin_vec_stvexwx (v,a,p); }
> ++void sew8(vui v, long a, void *p) { __builtin_vec_stvexwx (v,a,p); }
> ++void Dsew1(vsf v, long a, sf *p) { vec_stvexwx (v,a,p); }
> ++void Dsew2(vsi v, long a, si *p) { vec_stvexwx (v,a,p); }
> ++void Dsew3(vui v, long a, ui *p) { vec_stvexwx (v,a,p); }
> ++void Dsew4(vbi v, long a, si *p) { vec_stvexwx (v,a,p); }
> ++void Dsew5(vbi v, long a, ui *p) { vec_stvexwx (v,a,p); }
> ++void Dsew6(vsf v, long a, void *p) { vec_stvexwx (v,a,p); }
> ++void Dsew7(vsi v, long a, void *p) { vec_stvexwx (v,a,p); }
> ++void Dsew8(vui v, long a, void *p) { vec_stvexwx (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-1.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-1.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-1.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-1.c 2012-03-06 12:31:15.921038995 -0600
> +@@ -0,0 +1,48 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -mcpu=cell" } */
> ++/* { dg-final { scan-assembler-times "lvlx" 19 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc lc1(long a, void *p) { return __builtin_altivec_lvlx (a,p); }
> ++vsf llx01(long a, vsf *p) { return __builtin_vec_lvlx (a,p); }
> ++vsf llx02(long a, sf *p) { return __builtin_vec_lvlx (a,p); }
> ++vbi llx03(long a, vbi *p) { return __builtin_vec_lvlx (a,p); }
> ++vsi llx04(long a, vsi *p) { return __builtin_vec_lvlx (a,p); }
> ++vsi llx05(long a, si *p) { return __builtin_vec_lvlx (a,p); }
> ++vui llx06(long a, vui *p) { return __builtin_vec_lvlx (a,p); }
> ++vui llx07(long a, ui *p) { return __builtin_vec_lvlx (a,p); }
> ++vbs llx08(long a, vbs *p) { return __builtin_vec_lvlx (a,p); }
> ++vp llx09(long a, vp *p) { return __builtin_vec_lvlx (a,p); }
> ++vss llx10(long a, vss *p) { return __builtin_vec_lvlx (a,p); }
> ++vss llx11(long a, ss *p) { return __builtin_vec_lvlx (a,p); }
> ++vus llx12(long a, vus *p) { return __builtin_vec_lvlx (a,p); }
> ++vus llx13(long a, us *p) { return __builtin_vec_lvlx (a,p); }
> ++vbc llx14(long a, vbc *p) { return __builtin_vec_lvlx (a,p); }
> ++vsc llx15(long a, vsc *p) { return __builtin_vec_lvlx (a,p); }
> ++vsc llx16(long a, sc *p) { return __builtin_vec_lvlx (a,p); }
> ++vuc llx17(long a, vuc *p) { return __builtin_vec_lvlx (a,p); }
> ++vuc llx18(long a, uc *p) { return __builtin_vec_lvlx (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-2.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-2.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-2.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-2.c 2012-03-06 12:31:15.921038995 -0600
> +@@ -0,0 +1,48 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -mcpu=cell" } */
> ++/* { dg-final { scan-assembler-times "lvlxl" 19 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc lc2(long a, void *p) { return __builtin_altivec_lvlxl (a,p); }
> ++vsf llxl01(long a, vsf *p) { return __builtin_vec_lvlxl (a,p); }
> ++vsf llxl02(long a, sf *p) { return __builtin_vec_lvlxl (a,p); }
> ++vbi llxl03(long a, vbi *p) { return __builtin_vec_lvlxl (a,p); }
> ++vsi llxl04(long a, vsi *p) { return __builtin_vec_lvlxl (a,p); }
> ++vsi llxl05(long a, si *p) { return __builtin_vec_lvlxl (a,p); }
> ++vui llxl06(long a, vui *p) { return __builtin_vec_lvlxl (a,p); }
> ++vui llxl07(long a, ui *p) { return __builtin_vec_lvlxl (a,p); }
> ++vbs llxl08(long a, vbs *p) { return __builtin_vec_lvlxl (a,p); }
> ++vp llxl09(long a, vp *p) { return __builtin_vec_lvlxl (a,p); }
> ++vss llxl10(long a, vss *p) { return __builtin_vec_lvlxl (a,p); }
> ++vss llxl11(long a, ss *p) { return __builtin_vec_lvlxl (a,p); }
> ++vus llxl12(long a, vus *p) { return __builtin_vec_lvlxl (a,p); }
> ++vus llxl13(long a, us *p) { return __builtin_vec_lvlxl (a,p); }
> ++vbc llxl14(long a, vbc *p) { return __builtin_vec_lvlxl (a,p); }
> ++vsc llxl15(long a, vsc *p) { return __builtin_vec_lvlxl (a,p); }
> ++vsc llxl16(long a, sc *p) { return __builtin_vec_lvlxl (a,p); }
> ++vuc llxl17(long a, vuc *p) { return __builtin_vec_lvlxl (a,p); }
> ++vuc llxl18(long a, uc *p) { return __builtin_vec_lvlxl (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-3.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-3.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-3.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-3.c 2012-03-06 12:31:15.922038996 -0600
> +@@ -0,0 +1,48 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -mcpu=cell" } */
> ++/* { dg-final { scan-assembler-times "lvrx" 19 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc lc3(long a, void *p) { return __builtin_altivec_lvrx (a,p); }
> ++vsf lrx01(long a, vsf *p) { return __builtin_vec_lvrx (a,p); }
> ++vsf lrx02(long a, sf *p) { return __builtin_vec_lvrx (a,p); }
> ++vbi lrx03(long a, vbi *p) { return __builtin_vec_lvrx (a,p); }
> ++vsi lrx04(long a, vsi *p) { return __builtin_vec_lvrx (a,p); }
> ++vsi lrx05(long a, si *p) { return __builtin_vec_lvrx (a,p); }
> ++vui lrx06(long a, vui *p) { return __builtin_vec_lvrx (a,p); }
> ++vui lrx07(long a, ui *p) { return __builtin_vec_lvrx (a,p); }
> ++vbs lrx08(long a, vbs *p) { return __builtin_vec_lvrx (a,p); }
> ++vp lrx09(long a, vp *p) { return __builtin_vec_lvrx (a,p); }
> ++vss lrx10(long a, vss *p) { return __builtin_vec_lvrx (a,p); }
> ++vss lrx11(long a, ss *p) { return __builtin_vec_lvrx (a,p); }
> ++vus lrx12(long a, vus *p) { return __builtin_vec_lvrx (a,p); }
> ++vus lrx13(long a, us *p) { return __builtin_vec_lvrx (a,p); }
> ++vbc lrx14(long a, vbc *p) { return __builtin_vec_lvrx (a,p); }
> ++vsc lrx15(long a, vsc *p) { return __builtin_vec_lvrx (a,p); }
> ++vsc lrx16(long a, sc *p) { return __builtin_vec_lvrx (a,p); }
> ++vuc lrx17(long a, vuc *p) { return __builtin_vec_lvrx (a,p); }
> ++vuc lrx18(long a, uc *p) { return __builtin_vec_lvrx (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-4.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-4.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-4.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-4.c 2012-03-06 12:31:15.922038996 -0600
> +@@ -0,0 +1,48 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -mcpu=cell" } */
> ++/* { dg-final { scan-assembler-times "lvrxl" 19 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++vsc lc4(long a, void *p) { return __builtin_altivec_lvrxl (a,p); }
> ++vsf lrxl01(long a, vsf *p) { return __builtin_vec_lvrxl (a,p); }
> ++vsf lrxl02(long a, sf *p) { return __builtin_vec_lvrxl (a,p); }
> ++vbi lrxl03(long a, vbi *p) { return __builtin_vec_lvrxl (a,p); }
> ++vsi lrxl04(long a, vsi *p) { return __builtin_vec_lvrxl (a,p); }
> ++vsi lrxl05(long a, si *p) { return __builtin_vec_lvrxl (a,p); }
> ++vui lrxl06(long a, vui *p) { return __builtin_vec_lvrxl (a,p); }
> ++vui lrxl07(long a, ui *p) { return __builtin_vec_lvrxl (a,p); }
> ++vbs lrxl08(long a, vbs *p) { return __builtin_vec_lvrxl (a,p); }
> ++vp lrxl09(long a, vp *p) { return __builtin_vec_lvrxl (a,p); }
> ++vss lrxl10(long a, vss *p) { return __builtin_vec_lvrxl (a,p); }
> ++vss lrxl11(long a, ss *p) { return __builtin_vec_lvrxl (a,p); }
> ++vus lrxl12(long a, vus *p) { return __builtin_vec_lvrxl (a,p); }
> ++vus lrxl13(long a, us *p) { return __builtin_vec_lvrxl (a,p); }
> ++vbc lrxl14(long a, vbc *p) { return __builtin_vec_lvrxl (a,p); }
> ++vsc lrxl15(long a, vsc *p) { return __builtin_vec_lvrxl (a,p); }
> ++vsc lrxl16(long a, sc *p) { return __builtin_vec_lvrxl (a,p); }
> ++vuc lrxl17(long a, vuc *p) { return __builtin_vec_lvrxl (a,p); }
> ++vuc lrxl18(long a, uc *p) { return __builtin_vec_lvrxl (a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-5.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-5.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-5.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-5.c 2012-03-06 12:31:15.922038996 -0600
> +@@ -0,0 +1,48 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -mcpu=cell" } */
> ++/* { dg-final { scan-assembler-times "stvlx" 19 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void sc1(vsc v, long a, void *p) { __builtin_altivec_stvlx (v,a,p); }
> ++void slx01(vsf v, long a, vsf *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx02(vsf v, long a, sf *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx03(vbi v, long a, vbi *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx04(vsi v, long a, vsi *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx05(vsi v, long a, si *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx06(vui v, long a, vui *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx07(vui v, long a, ui *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx08(vbs v, long a, vbs *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx09(vp v, long a, vp *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx10(vss v, long a, vss *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx11(vss v, long a, ss *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx12(vus v, long a, vus *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx13(vus v, long a, us *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx14(vbc v, long a, vbc *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx15(vsc v, long a, vsc *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx16(vsc v, long a, sc *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx17(vuc v, long a, vuc *p) { __builtin_vec_stvlx (v,a,p); }
> ++void slx18(vuc v, long a, uc *p) { __builtin_vec_stvlx (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-6.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-6.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-6.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-6.c 2012-03-06 12:31:15.923039000 -0600
> +@@ -0,0 +1,48 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -mcpu=cell" } */
> ++/* { dg-final { scan-assembler-times "stvlxl" 19 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void sc2(vsc v, long a, void *p) { __builtin_altivec_stvlxl (v,a,p); }
> ++void slxl01(vsf v, long a, vsf *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl02(vsf v, long a, sf *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl03(vbi v, long a, vbi *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl04(vsi v, long a, vsi *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl05(vsi v, long a, si *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl06(vui v, long a, vui *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl07(vui v, long a, ui *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl08(vbs v, long a, vbs *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl09(vp v, long a, vp *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl10(vss v, long a, vss *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl11(vss v, long a, ss *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl12(vus v, long a, vus *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl13(vus v, long a, us *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl14(vbc v, long a, vbc *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl15(vsc v, long a, vsc *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl16(vsc v, long a, sc *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl17(vuc v, long a, vuc *p) { __builtin_vec_stvlxl (v,a,p); }
> ++void slxl18(vuc v, long a, uc *p) { __builtin_vec_stvlxl (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-7.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-7.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-7.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-7.c 2012-03-06 12:31:15.923039000 -0600
> +@@ -0,0 +1,48 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -mcpu=cell" } */
> ++/* { dg-final { scan-assembler-times "stvrx" 19 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void sc3(vsc v, long a, void *p) { __builtin_altivec_stvrx (v,a,p); }
> ++void srx01(vsf v, long a, vsf *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx02(vsf v, long a, sf *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx03(vbi v, long a, vbi *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx04(vsi v, long a, vsi *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx05(vsi v, long a, si *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx06(vui v, long a, vui *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx07(vui v, long a, ui *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx08(vbs v, long a, vbs *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx09(vp v, long a, vp *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx10(vss v, long a, vss *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx11(vss v, long a, ss *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx12(vus v, long a, vus *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx13(vus v, long a, us *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx14(vbc v, long a, vbc *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx15(vsc v, long a, vsc *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx16(vsc v, long a, sc *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx17(vuc v, long a, vuc *p) { __builtin_vec_stvrx (v,a,p); }
> ++void srx18(vuc v, long a, uc *p) { __builtin_vec_stvrx (v,a,p); }
> +diff -ruN gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-8.c gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-8.c
> +--- gcc-4.6.2-orig/gcc/testsuite/gcc.target/powerpc/cell_builtin-8.c 1969-12-31 18:00:00.000000000 -0600
> ++++ gcc-4.6.2/gcc/testsuite/gcc.target/powerpc/cell_builtin-8.c 2012-03-06 12:31:15.923039000 -0600
> +@@ -0,0 +1,48 @@
> ++/* { dg-do compile { target { powerpc*-*-* } } } */
> ++/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
> ++/* { dg-require-effective-target powerpc_altivec_ok } */
> ++/* { dg-options "-O2 -maltivec -mcpu=cell" } */
> ++/* { dg-final { scan-assembler-times "stvrxl" 19 } } */
> ++
> ++#include <altivec.h>
> ++
> ++typedef __vector signed char vsc;
> ++typedef __vector signed short vss;
> ++typedef __vector signed int vsi;
> ++typedef __vector unsigned char vuc;
> ++typedef __vector unsigned short vus;
> ++typedef __vector unsigned int vui;
> ++typedef __vector bool char vbc;
> ++typedef __vector bool short vbs;
> ++typedef __vector bool int vbi;
> ++typedef __vector float vsf;
> ++typedef __vector pixel vp;
> ++typedef signed char sc;
> ++typedef signed short ss;
> ++typedef signed int si;
> ++typedef signed long sl;
> ++typedef unsigned char uc;
> ++typedef unsigned short us;
> ++typedef unsigned int ui;
> ++typedef unsigned long ul;
> ++typedef float sf;
> ++
> ++void sc4(vsc v, long a, void *p) { __builtin_altivec_stvrxl (v,a,p); }
> ++void srxl01(vsf v, long a, vsf *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl02(vsf v, long a, sf *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl03(vbi v, long a, vbi *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl04(vsi v, long a, vsi *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl05(vsi v, long a, si *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl06(vui v, long a, vui *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl07(vui v, long a, ui *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl08(vbs v, long a, vbs *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl09(vp v, long a, vp *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl10(vss v, long a, vss *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl11(vss v, long a, ss *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl12(vus v, long a, vus *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl13(vus v, long a, us *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl14(vbc v, long a, vbc *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl15(vsc v, long a, vsc *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl16(vsc v, long a, sc *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl17(vuc v, long a, vuc *p) { __builtin_vec_stvrxl (v,a,p); }
> ++void srxl18(vuc v, long a, uc *p) { __builtin_vec_stvrxl (v,a,p); }
> diff --git a/meta/recipes-devtools/gcc/gcc-4.6/gcc.no_power_builtins.patch b/meta/recipes-devtools/gcc/gcc-4.6/gcc.no_power_builtins.patch
> new file mode 100644
> index 0000000..3f69f29
> --- /dev/null
> +++ b/meta/recipes-devtools/gcc/gcc-4.6/gcc.no_power_builtins.patch
> @@ -0,0 +1,30 @@
> +Upstream-Status: Pending
> +
> +People are working to include this fixes upstream
> +
> +--- gcc-4.6.3/gcc/config/rs6000/rs6000-c.c-orig 2012-07-10 12:16:59.708254001 -0500
> ++++ gcc-4.6.3/gcc/config/rs6000/rs6000-c.c 2012-07-10 12:18:58.625254001 -0500
> +@@ -272,19 +272,19 @@
> + builtin_define ("_ARCH_PPCGR");
> + if (TARGET_POWERPC64)
> + builtin_define ("_ARCH_PPC64");
> +- if (TARGET_MFCRF)
> ++ if (TARGET_MFCRF && rs6000_cpu != PROCESSOR_PPCE6500)
> + builtin_define ("_ARCH_PWR4");
> +- if (TARGET_POPCNTB)
> ++ if (TARGET_POPCNTB && rs6000_cpu != PROCESSOR_PPCE5500 && rs6000_cpu != PROCESSOR_PPCE6500)
> + builtin_define ("_ARCH_PWR5");
> + if (TARGET_FPRND)
> + builtin_define ("_ARCH_PWR5X");
> +- if (TARGET_CMPB)
> ++ if (TARGET_CMPB && rs6000_cpu != PROCESSOR_PPCE5500 && rs6000_cpu != PROCESSOR_PPCE6500)
> + builtin_define ("_ARCH_PWR6");
> + if (TARGET_MFPGPR)
> + builtin_define ("_ARCH_PWR6X");
> + if (! TARGET_POWER && ! TARGET_POWER2 && ! TARGET_POWERPC)
> + builtin_define ("_ARCH_COM");
> +- if (TARGET_POPCNTD)
> ++ if (TARGET_POPCNTD && rs6000_cpu != PROCESSOR_PPCE5500 && rs6000_cpu != PROCESSOR_PPCE6500)
> + builtin_define ("_ARCH_PWR7");
> + if (TARGET_ALTIVEC)
> + {
> diff --git a/meta/recipes-devtools/gcc/gcc-4.6/powerpc-e5500.patch b/meta/recipes-devtools/gcc/gcc-4.6/powerpc-e5500.patch
> deleted file mode 100644
> index 1f478f3..0000000
> --- a/meta/recipes-devtools/gcc/gcc-4.6/powerpc-e5500.patch
> +++ /dev/null
> @@ -1,465 +0,0 @@
> -Upstream-Status: Pending
> -
> -Implements basic e5500 enablement in gcc, with a scheduler, -mcpu
> -flag, etc...
> -
> -Also splits the masks for popcntb, popcntd, and cmpb. Originally those
> -masks would also control other instructions that e5500 does not
> -support (so, we either get none or all).
> -
> -For the lack of means to do tests, those instructions were never
> -enabled until now. The new instructions enabled with this patch are:
> -popcntb, popcntw, popcntd, bpermd, prtyw, prtyd, cmpb, ldbrx, and
> -stdbrx.
> -
> -Signed-off-by: Edmar Wienskoski <edmar at freescale.com>
> -Signed-off-by: Kumar Gala <galak at kernel.crashing.org>
> -
> -Index: gcc-4_6-branch/gcc/config.gcc
> -===================================================================
> ---- gcc-4_6-branch.orig/gcc/config.gcc
> -+++ gcc-4_6-branch/gcc/config.gcc
> -@@ -395,7 +395,7 @@ powerpc*-*-*)
> - extra_headers="ppc-asm.h altivec.h spe.h ppu_intrinsics.h paired.h spu2vmx.h vec_types.h si2vmx.h"
> - need_64bit_hwint=yes
> - case x$with_cpu in
> -- xpowerpc64|xdefault64|x6[23]0|x970|xG5|xpower[34567]|xpower6x|xrs64a|xcell|xa2|xe500mc64)
> -+ xpowerpc64|xdefault64|x6[23]0|x970|xG5|xpower[34567]|xpower6x|xrs64a|xcell|xa2|xe500mc64|xe5500)
> - cpu_is_64bit=yes
> - ;;
> - esac
> -@@ -3493,7 +3493,7 @@ case "${target}" in
> - | 401 | 403 | 405 | 405fp | 440 | 440fp | 464 | 464fp \
> - | 476 | 476fp | 505 | 601 | 602 | 603 | 603e | ec603e \
> - | 604 | 604e | 620 | 630 | 740 | 750 | 7400 | 7450 \
> -- | a2 | e300c[23] | 854[08] | e500mc | e500mc64 | titan\
> -+ | a2 | e300c[23] | 854[08] | e500mc | e500mc64 | e5500 | titan\
> - | 801 | 821 | 823 | 860 | 970 | G3 | G4 | G5 | cell)
> - # OK
> - ;;
> -Index: gcc-4_6-branch/gcc/config/rs6000/e5500.md
> -===================================================================
> ---- /dev/null
> -+++ gcc-4_6-branch/gcc/config/rs6000/e5500.md
> -@@ -0,0 +1,176 @@
> -+;; Pipeline description for Freescale PowerPC e5500 core.
> -+;; Copyright (C) 2011 Free Software Foundation, Inc.
> -+;; Contributed by Edmar Wienskoski (edmar at freescale.com)
> -+;;
> -+;; This file is part of GCC.
> -+;;
> -+;; GCC is free software; you can redistribute it and/or modify it
> -+;; under the terms of the GNU General Public License as published
> -+;; by the Free Software Foundation; either version 3, or (at your
> -+;; option) any later version.
> -+;;
> -+;; GCC is distributed in the hope that it will be useful, but WITHOUT
> -+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
> -+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
> -+;; License for more details.
> -+;;
> -+;; You should have received a copy of the GNU General Public License
> -+;; along with GCC; see the file COPYING3. If not see
> -+;; <http://www.gnu.org/licenses/>.
> -+;;
> -+;; e5500 64-bit SFX(2), CFX, LSU, FPU, BU
> -+;; Max issue 3 insns/clock cycle (includes 1 branch)
> -+
> -+(define_automaton "e5500_most,e5500_long")
> -+(define_cpu_unit "e5500_decode_0,e5500_decode_1" "e5500_most")
> -+
> -+;; SFX.
> -+(define_cpu_unit "e5500_sfx_0,e5500_sfx_1" "e5500_most")
> -+
> -+;; CFX.
> -+(define_cpu_unit "e5500_cfx_stage0,e5500_cfx_stage1" "e5500_most")
> -+
> -+;; Non-pipelined division.
> -+(define_cpu_unit "e5500_cfx_div" "e5500_long")
> -+
> -+;; LSU.
> -+(define_cpu_unit "e5500_lsu" "e5500_most")
> -+
> -+;; FPU.
> -+(define_cpu_unit "e5500_fpu" "e5500_long")
> -+
> -+;; BU.
> -+(define_cpu_unit "e5500_bu" "e5500_most")
> -+
> -+;; The following units are used to make the automata deterministic.
> -+(define_cpu_unit "present_e5500_decode_0" "e5500_most")
> -+(define_cpu_unit "present_e5500_sfx_0" "e5500_most")
> -+(presence_set "present_e5500_decode_0" "e5500_decode_0")
> -+(presence_set "present_e5500_sfx_0" "e5500_sfx_0")
> -+
> -+;; Some useful abbreviations.
> -+(define_reservation "e5500_decode"
> -+ "e5500_decode_0|e5500_decode_1+present_e5500_decode_0")
> -+(define_reservation "e5500_sfx"
> -+ "e5500_sfx_0|e5500_sfx_1+present_e5500_sfx_0")
> -+
> -+;; SFX.
> -+(define_insn_reservation "e5500_sfx" 1
> -+ (and (eq_attr "type" "integer,insert_word,insert_dword,delayed_compare,\
> -+ shift,cntlz,exts")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_sfx")
> -+
> -+(define_insn_reservation "e5500_sfx2" 2
> -+ (and (eq_attr "type" "cmp,compare,fast_compare,trap")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_sfx")
> -+
> -+(define_insn_reservation "e5500_delayed" 2
> -+ (and (eq_attr "type" "var_shift_rotate,var_delayed_compare,popcnt")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_sfx*2")
> -+
> -+(define_insn_reservation "e5500_two" 2
> -+ (and (eq_attr "type" "two")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_decode+e5500_sfx,e5500_sfx")
> -+
> -+(define_insn_reservation "e5500_three" 3
> -+ (and (eq_attr "type" "three")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,(e5500_decode+e5500_sfx)*2,e5500_sfx")
> -+
> -+;; SFX - Mfcr.
> -+(define_insn_reservation "e5500_mfcr" 4
> -+ (and (eq_attr "type" "mfcr")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_sfx_0*4")
> -+
> -+;; SFX - Mtcrf.
> -+(define_insn_reservation "e5500_mtcrf" 1
> -+ (and (eq_attr "type" "mtcr")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_sfx_0")
> -+
> -+;; SFX - Mtjmpr.
> -+(define_insn_reservation "e5500_mtjmpr" 1
> -+ (and (eq_attr "type" "mtjmpr,mfjmpr")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_sfx")
> -+
> -+;; CFX - Multiply.
> -+(define_insn_reservation "e5500_multiply" 4
> -+ (and (eq_attr "type" "imul")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_cfx_stage0,e5500_cfx_stage1")
> -+
> -+(define_insn_reservation "e5500_multiply_i" 5
> -+ (and (eq_attr "type" "imul2,imul3,imul_compare")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_cfx_stage0,\
> -+ e5500_cfx_stage0+e5500_cfx_stage1,e5500_cfx_stage1")
> -+
> -+;; CFX - Divide.
> -+(define_insn_reservation "e5500_divide" 16
> -+ (and (eq_attr "type" "idiv")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_cfx_stage0+e5500_cfx_div,\
> -+ e5500_cfx_div*15")
> -+
> -+(define_insn_reservation "e5500_divide_d" 26
> -+ (and (eq_attr "type" "ldiv")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_cfx_stage0+e5500_cfx_div,\
> -+ e5500_cfx_div*25")
> -+
> -+;; LSU - Loads.
> -+(define_insn_reservation "e5500_load" 3
> -+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
> -+ load_l,sync")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_lsu")
> -+
> -+(define_insn_reservation "e5500_fpload" 4
> -+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_lsu")
> -+
> -+;; LSU - Stores.
> -+(define_insn_reservation "e5500_store" 3
> -+ (and (eq_attr "type" "store,store_ux,store_u,store_c")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_lsu")
> -+
> -+(define_insn_reservation "e5500_fpstore" 3
> -+ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_lsu")
> -+
> -+;; FP.
> -+(define_insn_reservation "e5500_float" 7
> -+ (and (eq_attr "type" "fpsimple,fp,fpcompare,dmul")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_fpu")
> -+
> -+(define_insn_reservation "e5500_sdiv" 20
> -+ (and (eq_attr "type" "sdiv")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_fpu*20")
> -+
> -+(define_insn_reservation "e5500_ddiv" 35
> -+ (and (eq_attr "type" "ddiv")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_fpu*35")
> -+
> -+;; BU.
> -+(define_insn_reservation "e5500_branch" 1
> -+ (and (eq_attr "type" "jmpreg,branch,isync")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_bu")
> -+
> -+;; BU - CR logical.
> -+(define_insn_reservation "e5500_cr_logical" 1
> -+ (and (eq_attr "type" "cr_logical,delayed_cr")
> -+ (eq_attr "cpu" "ppce5500"))
> -+ "e5500_decode,e5500_bu")
> -Index: gcc-4_6-branch/gcc/config/rs6000/rs6000-opts.h
> -===================================================================
> ---- gcc-4_6-branch.orig/gcc/config/rs6000/rs6000-opts.h
> -+++ gcc-4_6-branch/gcc/config/rs6000/rs6000-opts.h
> -@@ -53,6 +53,7 @@ enum processor_type
> - PROCESSOR_PPCE300C3,
> - PROCESSOR_PPCE500MC,
> - PROCESSOR_PPCE500MC64,
> -+ PROCESSOR_PPCE5500,
> - PROCESSOR_POWER4,
> - PROCESSOR_POWER5,
> - PROCESSOR_POWER6,
> -Index: gcc-4_6-branch/gcc/config/rs6000/rs6000.c
> -===================================================================
> ---- gcc-4_6-branch.orig/gcc/config/rs6000/rs6000.c
> -+++ gcc-4_6-branch/gcc/config/rs6000/rs6000.c
> -@@ -779,6 +779,25 @@ struct processor_costs ppce500mc64_cost
> - 1, /* prefetch streams /*/
> - };
> -
> -+/* Instruction costs on PPCE5500 processors. */
> -+static const
> -+struct processor_costs ppce5500_cost = {
> -+ COSTS_N_INSNS (5), /* mulsi */
> -+ COSTS_N_INSNS (5), /* mulsi_const */
> -+ COSTS_N_INSNS (5), /* mulsi_const9 */
> -+ COSTS_N_INSNS (5), /* muldi */
> -+ COSTS_N_INSNS (14), /* divsi */
> -+ COSTS_N_INSNS (14), /* divdi */
> -+ COSTS_N_INSNS (7), /* fp */
> -+ COSTS_N_INSNS (10), /* dmul */
> -+ COSTS_N_INSNS (36), /* sdiv */
> -+ COSTS_N_INSNS (66), /* ddiv */
> -+ 64, /* cache line size */
> -+ 32, /* l1 cache */
> -+ 128, /* l2 cache */
> -+ 1, /* prefetch streams /*/
> -+};
> -+
> - /* Instruction costs on AppliedMicro Titan processors. */
> - static const
> - struct processor_costs titan_cost = {
> -@@ -1784,6 +1803,9 @@ static struct rs6000_ptt const processor
> - | MASK_ISEL},
> - {"e500mc64", PROCESSOR_PPCE500MC64, POWERPC_BASE_MASK | MASK_POWERPC64
> - | MASK_PPC_GFXOPT | MASK_ISEL},
> -+ {"e5500", PROCESSOR_PPCE5500, POWERPC_BASE_MASK | MASK_POWERPC64
> -+ | MASK_PPC_GFXOPT | MASK_ISEL | MASK_CMPB | MASK_POPCNTB
> -+ | MASK_POPCNTD},
> - {"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
> - {"970", PROCESSOR_POWER4,
> - POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
> -@@ -2741,7 +2763,8 @@ rs6000_option_override_internal (bool gl
> - : PROCESSOR_DEFAULT));
> -
> - if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
> -- || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64)
> -+ || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
> -+ || rs6000_cpu == PROCESSOR_PPCE5500)
> - {
> - if (TARGET_ALTIVEC)
> - error ("AltiVec not supported in this target");
> -@@ -2842,9 +2865,14 @@ rs6000_option_override_internal (bool gl
> - user's opinion, though. */
> - if (rs6000_block_move_inline_limit == 0
> - && (rs6000_cpu == PROCESSOR_PPCE500MC
> -- || rs6000_cpu == PROCESSOR_PPCE500MC64))
> -+ || rs6000_cpu == PROCESSOR_PPCE500MC64
> -+ || rs6000_cpu == PROCESSOR_PPCE5500))
> - rs6000_block_move_inline_limit = 128;
> -
> -+ /* Those machines does not have fsqrt instruction */
> -+ if (rs6000_cpu == PROCESSOR_PPCE5500)
> -+ target_flags &= ~MASK_PPC_GPOPT;
> -+
> - /* store_one_arg depends on expand_block_move to handle at least the
> - size of reg_parm_stack_space. */
> - if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
> -@@ -2976,7 +3004,8 @@ rs6000_option_override_internal (bool gl
> - #endif
> -
> - if (TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC
> -- || rs6000_cpu == PROCESSOR_PPCE500MC64)
> -+ || rs6000_cpu == PROCESSOR_PPCE500MC64
> -+ || rs6000_cpu == PROCESSOR_PPCE5500)
> - {
> - /* The e500 and e500mc do not have string instructions, and we set
> - MASK_STRING above when optimizing for size. */
> -@@ -3023,7 +3052,8 @@ rs6000_option_override_internal (bool gl
> - || rs6000_cpu == PROCESSOR_POWER6
> - || rs6000_cpu == PROCESSOR_POWER7
> - || rs6000_cpu == PROCESSOR_PPCE500MC
> -- || rs6000_cpu == PROCESSOR_PPCE500MC64);
> -+ || rs6000_cpu == PROCESSOR_PPCE500MC64
> -+ || rs6000_cpu == PROCESSOR_PPCE5500);
> -
> - /* Allow debug switches to override the above settings. These are set to -1
> - in rs6000.opt to indicate the user hasn't directly set the switch. */
> -@@ -3245,6 +3275,10 @@ rs6000_option_override_internal (bool gl
> - rs6000_cost = &ppce500mc64_cost;
> - break;
> -
> -+ case PROCESSOR_PPCE5500:
> -+ rs6000_cost = &ppce5500_cost;
> -+ break;
> -+
> - case PROCESSOR_TITAN:
> - rs6000_cost = &titan_cost;
> - break;
> -@@ -23227,6 +23261,7 @@ rs6000_adjust_cost (rtx insn, rtx link,
> - || rs6000_cpu_attr == CPU_PPC750
> - || rs6000_cpu_attr == CPU_PPC7400
> - || rs6000_cpu_attr == CPU_PPC7450
> -+ || rs6000_cpu_attr == CPU_PPCE5500
> - || rs6000_cpu_attr == CPU_POWER4
> - || rs6000_cpu_attr == CPU_POWER5
> - || rs6000_cpu_attr == CPU_POWER7
> -@@ -23771,6 +23806,7 @@ rs6000_issue_rate (void)
> - case CPU_PPCE300C3:
> - case CPU_PPCE500MC:
> - case CPU_PPCE500MC64:
> -+ case CPU_PPCE5500:
> - case CPU_TITAN:
> - return 2;
> - case CPU_RIOS2:
> -Index: gcc-4_6-branch/gcc/config/rs6000/rs6000.h
> -===================================================================
> ---- gcc-4_6-branch.orig/gcc/config/rs6000/rs6000.h
> -+++ gcc-4_6-branch/gcc/config/rs6000/rs6000.h
> -@@ -168,6 +168,7 @@
> - %{mcpu=e300c3: -me300} \
> - %{mcpu=e500mc: -me500mc} \
> - %{mcpu=e500mc64: -me500mc64} \
> -+%{mcpu=e5500: -me5500} \
> - %{maltivec: -maltivec} \
> - %{mvsx: -mvsx %{!maltivec: -maltivec} %{!mcpu*: %(asm_cpu_power7)}} \
> - -many"
> -@@ -477,13 +478,13 @@ extern int rs6000_vector_align[];
> -
> - #define TARGET_FCTIDZ TARGET_FCFID
> - #define TARGET_STFIWX TARGET_PPC_GFXOPT
> --#define TARGET_LFIWAX TARGET_CMPB
> --#define TARGET_LFIWZX TARGET_POPCNTD
> --#define TARGET_FCFIDS TARGET_POPCNTD
> --#define TARGET_FCFIDU TARGET_POPCNTD
> --#define TARGET_FCFIDUS TARGET_POPCNTD
> --#define TARGET_FCTIDUZ TARGET_POPCNTD
> --#define TARGET_FCTIWUZ TARGET_POPCNTD
> -+#define TARGET_LFIWAX (TARGET_CMPB && rs6000_cpu != PROCESSOR_PPCE5500)
> -+#define TARGET_LFIWZX (TARGET_POPCNTD && rs6000_cpu != PROCESSOR_PPCE5500)
> -+#define TARGET_FCFIDS TARGET_LFIWZX
> -+#define TARGET_FCFIDU TARGET_LFIWZX
> -+#define TARGET_FCFIDUS TARGET_LFIWZX
> -+#define TARGET_FCTIDUZ TARGET_LFIWZX
> -+#define TARGET_FCTIWUZ TARGET_LFIWZX
> -
> - /* E500 processors only support plain "sync", not lwsync. */
> - #define TARGET_NO_LWSYNC TARGET_E500
> -@@ -494,10 +495,12 @@ extern int rs6000_vector_align[];
> -
> - #define TARGET_FRE (TARGET_HARD_FLOAT && TARGET_FPRS \
> - && TARGET_DOUBLE_FLOAT \
> -- && (TARGET_POPCNTB || VECTOR_UNIT_VSX_P (DFmode)))
> -+ && (TARGET_POPCNTB || VECTOR_UNIT_VSX_P (DFmode)) \
> -+ && rs6000_cpu != PROCESSOR_PPCE5500)
> -
> - #define TARGET_FRSQRTES (TARGET_HARD_FLOAT && TARGET_POPCNTB \
> -- && TARGET_FPRS && TARGET_SINGLE_FLOAT)
> -+ && TARGET_FPRS && TARGET_SINGLE_FLOAT \
> -+ && rs6000_cpu != PROCESSOR_PPCE5500)
> -
> - #define TARGET_FRSQRTE (TARGET_HARD_FLOAT && TARGET_FPRS \
> - && TARGET_DOUBLE_FLOAT \
> -Index: gcc-4_6-branch/gcc/config/rs6000/rs6000.md
> -===================================================================
> ---- gcc-4_6-branch.orig/gcc/config/rs6000/rs6000.md
> -+++ gcc-4_6-branch/gcc/config/rs6000/rs6000.md
> -@@ -126,7 +126,7 @@
> -
> - ;; Define an insn type attribute. This is used in function unit delay
> - ;; computations.
> --(define_attr "type" "integer,two,three,load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,store,store_ux,store_u,fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,vecload,vecstore,imul,imul2,imul3,lmul,idiv,ldiv,insert_word,branch,cmp,fast_compare,compare,var_delayed_compare,delayed_compare,imul_compare,lmul_compare,fpcompare,cr_logical,delayed_cr,mfcr,mfcrf,mtcr,mfjmpr,mtjmpr,fp,fpsimple,dmul,sdiv,ddiv,ssqrt,dsqrt,jmpreg,brinc,vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,vecfloat,vecfdiv,isync,sync,load_l,store_c,shift,trap,insert_dword,var_shift_rotate,cntlz,exts,mffgpr,mftgpr,isel"
> -+(define_attr "type" "integer,two,three,load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,store,store_ux,store_u,fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,vecload,vecstore,imul,imul2,imul3,lmul,idiv,ldiv,insert_word,branch,cmp,fast_compare,compare,var_delayed_compare,delayed_compare,imul_compare,lmul_compare,fpcompare,cr_logical,delayed_cr,mfcr,mfcrf,mtcr,mfjmpr,mtjmpr,fp,fpsimple,dmul,sdiv,ddiv,ssqrt,dsqrt,jmpreg,brinc,vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,vecfloat,vecfdiv,isync,sync,load_l,store_c,shift,trap,insert_dword,var_shift_rotate,cntlz,exts,mffgpr,mftgpr,isel,popcnt"
> - (const_string "integer"))
> -
> - ;; Define floating point instruction sub-types for use with Xfpu.md
> -@@ -148,7 +148,7 @@
> - ;; Processor type -- this attribute must exactly match the processor_type
> - ;; enumeration in rs6000.h.
> -
> --(define_attr "cpu" "rios1,rios2,rs64a,mpccore,ppc403,ppc405,ppc440,ppc476,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400,ppc7450,ppc8540,ppce300c2,ppce300c3,ppce500mc,ppce500mc64,power4,power5,power6,power7,cell,ppca2,titan"
> -+(define_attr "cpu" "rios1,rios2,rs64a,mpccore,ppc403,ppc405,ppc440,ppc476,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400,ppc7450,ppc8540,ppce300c2,ppce300c3,ppce500mc,ppce500mc64,ppce5500,power4,power5,power6,power7,cell,ppca2,titan"
> - (const (symbol_ref "rs6000_cpu_attr")))
> -
> -
> -@@ -176,6 +176,7 @@
> - (include "e300c2c3.md")
> - (include "e500mc.md")
> - (include "e500mc64.md")
> -+(include "e5500.md")
> - (include "power4.md")
> - (include "power5.md")
> - (include "power6.md")
> -@@ -2302,13 +2303,17 @@
> - (unspec:GPR [(match_operand:GPR 1 "gpc_reg_operand" "r")]
> - UNSPEC_POPCNTB))]
> - "TARGET_POPCNTB"
> -- "popcntb %0,%1")
> -+ "popcntb %0,%1"
> -+ [(set_attr "length" "4")
> -+ (set_attr "type" "popcnt")])
> -
> - (define_insn "popcntd<mode>2"
> - [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
> - (popcount:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
> - "TARGET_POPCNTD"
> -- "popcnt<wd> %0,%1")
> -+ "popcnt<wd> %0,%1"
> -+ [(set_attr "length" "4")
> -+ (set_attr "type" "popcnt")])
> -
> - (define_expand "popcount<mode>2"
> - [(set (match_operand:GPR 0 "gpc_reg_operand" "")
> -@@ -5957,10 +5962,10 @@
> - && ((TARGET_PPC_GFXOPT
> - && !HONOR_NANS (<MODE>mode)
> - && !HONOR_SIGNED_ZEROS (<MODE>mode))
> -- || TARGET_CMPB
> -+ || TARGET_LFIWAX
> - || VECTOR_UNIT_VSX_P (<MODE>mode))"
> - {
> -- if (TARGET_CMPB || VECTOR_UNIT_VSX_P (<MODE>mode))
> -+ if (TARGET_LFIWAX || VECTOR_UNIT_VSX_P (<MODE>mode))
> - {
> - emit_insn (gen_copysign<mode>3_fcpsgn (operands[0], operands[1],
> - operands[2]));
> -@@ -5979,7 +5984,7 @@
> - (unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "<rreg2>")
> - (match_operand:SFDF 2 "gpc_reg_operand" "<rreg2>")]
> - UNSPEC_COPYSIGN))]
> -- "TARGET_CMPB && !VECTOR_UNIT_VSX_P (<MODE>mode)"
> -+ "TARGET_LFIWAX && !VECTOR_UNIT_VSX_P (<MODE>mode)"
> - "fcpsgn %0,%2,%1"
> - [(set_attr "type" "fp")])
> -
> --
> 1.7.9.7
>
>
>
> _______________________________________________
> Openembedded-core mailing list
> Openembedded-core at lists.openembedded.org
> http://lists.linuxtogo.org/cgi-bin/mailman/listinfo/openembedded-core
More information about the Openembedded-core
mailing list