Index: llvm/trunk/lib/Target/ARM/ARMInstrFormats.td
===================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrFormats.td
+++ llvm/trunk/lib/Target/ARM/ARMInstrFormats.td
@@ -1005,6 +1005,9 @@
 class Thumb2DSPMulPat<dag pattern, dag result> : Pat<pattern, result> {
   list<Predicate> Predicates = [IsThumb2, UseMulOps, HasDSP];
 }
+class Thumb2ExtractPat<dag pattern, dag result> : Pat<pattern, result> {
+  list<Predicate> Predicates = [IsThumb2, HasT2ExtractPack];
+}
 //===----------------------------------------------------------------------===//
 // Thumb Instruction Format Definitions.
 //
Index: llvm/trunk/lib/Target/ARM/ARMInstrInfo.td
===================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrInfo.td
+++ llvm/trunk/lib/Target/ARM/ARMInstrInfo.td
@@ -3416,6 +3416,12 @@
 def SXTAH : AI_exta_rrot<0b01101011,
                "sxtah", BinOpFrag<(add node:$LHS, (sext_inreg node:$RHS,i16))>>;
 
+def : ARMV6Pat<(add rGPR:$Rn, (sext_inreg (srl rGPR:$Rm, rot_imm:$rot), i8)),
+               (SXTAB rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+def : ARMV6Pat<(add rGPR:$Rn, (sext_inreg (srl rGPR:$Rm, imm8_or_16:$rot),
+                                          i16)),
+               (SXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+
 def SXTB16  : AI_ext_rrot_np<0b01101000, "sxtb16">;
 
 def SXTAB16 : AI_exta_rrot_np<0b01101000, "sxtab16">;
@@ -3443,6 +3449,11 @@
                         BinOpFrag<(add node:$LHS, (and node:$RHS, 0x00FF))>>;
 def UXTAH : AI_exta_rrot<0b01101111, "uxtah",
                         BinOpFrag<(add node:$LHS, (and node:$RHS, 0xFFFF))>>;
+
+def : ARMV6Pat<(add rGPR:$Rn, (and (srl rGPR:$Rm, rot_imm:$rot), 0xFF)),
+               (UXTAB rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+def : ARMV6Pat<(add rGPR:$Rn, (and (srl rGPR:$Rm, imm8_or_16:$rot), 0xFFFF)),
+               (UXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
 }
 
 // This isn't safe in general, the add is two 16-bit units, not a 32-bit add.
Index: llvm/trunk/lib/Target/ARM/ARMInstrThumb2.td
===================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrThumb2.td
+++ llvm/trunk/lib/Target/ARM/ARMInstrThumb2.td
@@ -1984,12 +1984,19 @@
 // A simple right-shift can also be used in most cases (the exception is the
 // SXTH operations with a rotate of 24: there the non-contiguous bits are
 // relevant).
-def : Pat<(add rGPR:$Rn, (sext_inreg (srl rGPR:$Rm, rot_imm:$rot), i8)),
-          (t2SXTAB rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>,
-      Requires<[HasT2ExtractPack, IsThumb2]>;
-def : Pat<(add rGPR:$Rn, (sext_inreg (srl rGPR:$Rm, imm8_or_16:$rot), i16)),
-          (t2SXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>,
-      Requires<[HasT2ExtractPack, IsThumb2]>;
+def : Thumb2ExtractPat<(add rGPR:$Rn, (sext_inreg
+                                        (srl rGPR:$Rm, rot_imm:$rot), i8)),
+                       (t2SXTAB rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2ExtractPat<(add rGPR:$Rn, (sext_inreg
+                                        (srl rGPR:$Rm, imm8_or_16:$rot), i16)),
+                       (t2SXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2ExtractPat<(add rGPR:$Rn, (sext_inreg
+                                        (rotr rGPR:$Rm, (i32 24)), i16)),
+                       (t2SXTAH rGPR:$Rn, rGPR:$Rm, (i32 3))>;
+def : Thumb2ExtractPat<(add rGPR:$Rn, (sext_inreg
+                                        (or (srl rGPR:$Rm, (i32 24)),
+                                              (shl rGPR:$Rm, (i32 8))), i16)),
+                       (t2SXTAH rGPR:$Rn, rGPR:$Rm, (i32 3))>;
 
 // Zero extenders
 
@@ -2018,12 +2025,12 @@
                            BinOpFrag<(add node:$LHS, (and node:$RHS, 0xFFFF))>>;
 def t2UXTAB16 : T2I_exta_rrot_np<0b011, "uxtab16">;
 
-def : Pat<(add rGPR:$Rn, (and (srl rGPR:$Rm, rot_imm:$rot), 0xFF)),
-          (t2UXTAB rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>,
-      Requires<[HasT2ExtractPack, IsThumb2]>;
-def : Pat<(add rGPR:$Rn, (and (srl rGPR:$Rm, imm8_or_16:$rot), 0xFFFF)),
-          (t2UXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>,
-      Requires<[HasT2ExtractPack, IsThumb2]>;
+def : Thumb2ExtractPat<(add rGPR:$Rn, (and (srl rGPR:$Rm, rot_imm:$rot),
+                                           0xFF)),
+                       (t2UXTAB rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
+def : Thumb2ExtractPat<(add rGPR:$Rn, (and (srl rGPR:$Rm, imm8_or_16:$rot),
+                                            0xFFFF)),
+                       (t2UXTAH rGPR:$Rn, rGPR:$Rm, rot_imm:$rot)>;
 }
 
 
Index: llvm/trunk/test/CodeGen/ARM/sxt_rot.ll
===================================================================
--- llvm/trunk/test/CodeGen/ARM/sxt_rot.ll
+++ llvm/trunk/test/CodeGen/ARM/sxt_rot.ll
@@ -1,16 +1,20 @@
-; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s --check-prefix=CHECK-V6
+; RUN: llc -mtriple=arm-eabi -mattr=+v7 %s -o - | FileCheck %s --check-prefix=CHECK-V7
 
 define i32 @test0(i8 %A) {
-; CHECK: test0
-; CHECK: sxtb r0, r0 
+; CHECK-LABEL: test0
+; CHECK-V6: sxtb r0, r0
+; CHECK-V7: sxtb r0, r0
   %B = sext i8 %A to i32
   ret i32 %B
 }
 
 define signext i8 @test1(i32 %A) {
-; CHECK: test1
-; CHECK: lsr r0, r0, #8
-; CHECK: sxtb r0, r0
+; CHECK-LABEL: test1
+; CHECK-V6: lsr r0, r0, #8
+; CHECK-V6: sxtb r0, r0
+; CHECK-V6-NOT: sbfx
+; CHECk-V7: sbfx r0, r0, #8, #8
   %B = lshr i32 %A, 8
   %C = shl i32 %A, 24
   %D = or i32 %B, %C
@@ -19,8 +23,9 @@
 }
 
 define signext i32 @test2(i32 %A, i32 %X) {
-; CHECK: test2
-; CHECK: sxtab r0, r1, r0
+; CHECK-LABEL: test2
+; CHECK-V6: sxtab r0, r1, r0, ror #8
+; CHECK-V7: sxtab r0, r1, r0, ror #8
   %B = lshr i32 %A, 8
   %C = shl i32 %A, 24
   %D = or i32 %B, %C
@@ -29,3 +34,80 @@
   %G = add i32 %F, %X
   ret i32 %G
 }
+
+define signext i32 @test3(i32 %A, i32 %X) {
+; CHECK-LABEL: test3
+; CHECK-V6: sxtab r0, r1, r0, ror #16
+; CHECK-V7: sxtab r0, r1, r0, ror #16
+  %B = lshr i32 %A, 16
+  %C = shl i32 %A, 16
+  %D = or i32 %B, %C
+  %E = trunc i32 %D to i8
+  %F = sext i8 %E to i32
+  %G = add i32 %F, %X
+  ret i32 %G
+}
+
+define signext i32 @test4(i32 %A, i32 %X) {
+; CHECK-LABEL: test4
+; CHECK-V6: sxtah r0, r1, r0, ror #8
+; CHECK-V7: sxtah r0, r1, r0, ror #8
+  %B = lshr i32 %A, 8
+  %C = shl i32 %A, 24
+  %D = or i32 %B, %C
+  %E = trunc i32 %D to i16
+  %F = sext i16 %E to i32
+  %G = add i32 %F, %X
+  ret i32 %G
+}
+
+define signext i32 @test5(i32 %A, i32 %X) {
+; CHECK-LABEL: test5
+; CHECK-V6: sxtah r0, r1, r0, ror #24
+; CHECK-V7: sxtah r0, r1, r0, ror #24
+  %B = lshr i32 %A, 24
+  %C = shl i32 %A, 8
+  %D = or i32 %B, %C
+  %E = trunc i32 %D to i16
+  %F = sext i16 %E to i32
+  %G = add i32 %F, %X
+  ret i32 %G
+}
+
+define i32 @test6(i8 %A, i32 %X) {
+; CHECK-LABEL: test6
+; CHECK-V6: sxtab r0, r1, r0
+; CHECK-V7: sxtab r0, r1, r0
+  %sext = sext i8 %A to i32
+  %add = add i32 %X, %sext
+  ret i32 %add
+}
+
+define i32 @test7(i32 %A, i32 %X) {
+; CHECK-LABEL: test7
+; CHECK-V6: sxtab r0, r1, r0
+; CHECK-V7: sxtab r0, r1, r0
+  %shl = shl i32 %A, 24
+  %shr = ashr i32 %shl, 24
+  %add = add i32 %X, %shr
+  ret i32 %add
+}
+
+define i32 @test8(i16 %A, i32 %X) {
+; CHECK-LABEL: test8
+; CHECK-V6: sxtah r0, r1, r0
+; CHECK-V7: sxtah r0, r1, r0
+  %sext = sext i16 %A to i32
+  %add = add i32 %X, %sext
+  ret i32 %add
+}
+
+define i32 @test9(i32 %A, i32 %X) {
+; CHECK-LABEL: test9
+; CHECK-V6: sxtah r0, r1, r0
+; CHECK-V7: sxtah r0, r1, r0
+  %shl = shl i32 %A, 16
+  %shr = ashr i32 %shl, 16
+  %add = add i32 %X, %shr
+  ret i32 %add
+}
Index: llvm/trunk/test/CodeGen/ARM/uxt_rot.ll
===================================================================
--- llvm/trunk/test/CodeGen/ARM/uxt_rot.ll
+++ llvm/trunk/test/CodeGen/ARM/uxt_rot.ll
@@ -1,11 +1,18 @@
-; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s
+; RUN: llc -mtriple=arm-eabi -mattr=+v6 %s -o - | FileCheck %s --check-prefix=CHECK-V6
+; RUN: llc -mtriple=arm-eabi -mattr=+v7 %s -o - | FileCheck %s --check-prefix=CHECK-V7
 
 define zeroext i8 @test1(i32 %A.u) {
+  ; CHECK-LABEL: test1
+  ; CHECK-V6: uxtb
+  ; CHECK-V7: uxtb
     %B.u = trunc i32 %A.u to i8
     ret i8 %B.u
 }
 
 define zeroext i32 @test2(i32 %A.u, i32 %B.u) {
+  ; CHECK-LABEL: test2
+  ; CHECK-V6: uxtab r0, r0, r1
+  ; CHECK-V7: uxtab r0, r0, r1
     %C.u = trunc i32 %B.u to i8
     %D.u = zext i8 %C.u to i32
     %E.u = add i32 %A.u, %D.u
@@ -13,6 +20,9 @@
 }
 
 define zeroext i32 @test3(i32 %A.u) {
+  ; CHECK-LABEL: test3
+  ; CHECK-V6-NOT: ubfx
+  ; CHECK-V7: ubfx r0, r0, #8, #16
     %B.u = lshr i32 %A.u, 8
     %C.u = shl i32 %A.u, 24
     %D.u = or i32 %B.u, %C.u
@@ -21,12 +31,144 @@
     ret i32 %F.u
 }
 
-; CHECK: uxtb
-; CHECK-NOT: uxtb
+define zeroext i32 @test4(i32 %A.u) {
+  ; CHECK-LABEL: test4
+  ; CHECK-V6-NOT: ubfx
+  ; CHECK-V7: ubfx r0, r0, #8, #8
+    %B.u = lshr i32 %A.u, 8
+    %C.u = shl i32 %A.u, 24
+    %D.u = or i32 %B.u, %C.u
+    %E.u = trunc i32 %D.u to i8
+    %F.u = zext i8 %E.u to i32
+    ret i32 %F.u
+}
+
+define zeroext i16 @test5(i32 %A.u) {
+  ; CHECK-LABEL: test5
+  ; CHECK-V6: uxth
+  ; CHECK-V7: uxth
+    %B.u = trunc i32 %A.u to i16
+    ret i16 %B.u
+}
+
+define zeroext i32 @test6(i32 %A.u, i32 %B.u) {
+  ; CHECK-LABEL: test6
+  ; CHECK-V6: uxtah r0, r0, r1
+  ; CHECK-V7: uxtah r0, r0, r1
+    %C.u = trunc i32 %B.u to i16
+    %D.u = zext i16 %C.u to i32
+    %E.u = add i32 %A.u, %D.u
+    ret i32 %E.u
+}
+
+define zeroext i32 @test7(i32 %A, i32 %X) {
+; CHECK-LABEL: test7
+; CHECK-V6: uxtab r0, r1, r0, ror #8
+; CHECK-V7: uxtab r0, r1, r0, ror #8
+  %B = lshr i32 %A, 8
+  %C = shl i32 %A, 24
+  %D = or i32 %B, %C
+  %E = trunc i32 %D to i8
+  %F = zext i8 %E to i32
+  %G = add i32 %F, %X
+  ret i32 %G
+}
+
+define zeroext i32 @test8(i32 %A, i32 %X) {
+; CHECK-LABEL: test8
+; CHECK-V6: uxtab r0, r1, r0, ror #16
+; CHECK-V7: uxtab r0, r1, r0, ror #16
+  %B = lshr i32 %A, 16
+  %C = shl i32 %A, 16
+  %D = or i32 %B, %C
+  %E = trunc i32 %D to i8
+  %F = zext i8 %E to i32
+  %G = add i32 %F, %X
+  ret i32 %G
+}
+
+define zeroext i32 @test9(i32 %A, i32 %X) {
+; CHECK-LABEL: test9
+; CHECK-V6: uxtah r0, r1, r0, ror #8
+; CHECK-V7: uxtah r0, r1, r0, ror #8
+  %B = lshr i32 %A, 8
+  %C = shl i32 %A, 24
+  %D = or i32 %B, %C
+  %E = trunc i32 %D to i16
+  %F = zext i16 %E to i32
+  %G = add i32 %F, %X
+  ret i32 %G
+}
+
+define zeroext i32 @test10(i32 %A, i32 %X) {
+; CHECK-LABEL: test10
+; CHECK-V6: uxtah r0, r1, r0, ror #24
+; CHECK-V7: uxtah r0, r1, r0, ror #24
+  %B = lshr i32 %A, 24
+  %C = shl i32 %A, 8
+  %D = or i32 %B, %C
+  %E = trunc i32 %D to i16
+  %F = zext i16 %E to i32
+  %G = add i32 %F, %X
+  ret i32 %G
+}
+
+define zeroext i32 @test11(i32 %A, i32 %X) {
+; CHECK-LABEL: test11
+; CHECK-V6: uxtab r0, r1, r0
+; CHECK-V7: uxtab r0, r1, r0
+  %B = and i32 %A, 255
+  %add = add i32 %X, %B
+  ret i32 %add
+}
 
-; CHECK: uxtab
-; CHECK-NOT: uxtab
+define zeroext i32 @test12(i32 %A, i32 %X) {
+; CHECK-LABEL: test12
+; CHECK-V6: uxtab r0, r1, r0, ror #8
+; CHECK-V7: uxtab r0, r1, r0, ror #8
+  %B = lshr i32 %A, 8
+  %and = and i32 %B, 255
+  %add = add i32 %and, %X
+  ret i32 %add
+}
+
+define zeroext i32 @test13(i32 %A, i32 %X) {
+; CHECK-LABEL: test13
+; CHECK-V6: uxtab r0, r1, r0, ror #16
+; CHECK-V7: uxtab r0, r1, r0, ror #16
+  %B = lshr i32 %A, 16
+  %and = and i32 %B, 255
+  %add = add i32 %and, %X
+  ret i32 %add
+}
 
-; CHECK: uxth
-; CHECK-NOT: uxth
+define zeroext i32 @test14(i32 %A, i32 %X) {
+; CHECK-LABEL: test14
+; CHECK-V6: uxtah r0, r1, r0
+; CHECK-V7: uxtah r0, r1, r0
+  %B = and i32 %A, 65535
+  %add = add i32 %X, %B
+  ret i32 %add
+}
 
+define zeroext i32 @test15(i32 %A, i32 %X) {
+; CHECK-LABEL: test15
+; CHECK-V6: uxtah r0, r1, r0, ror #8
+; CHECK-V7: uxtah r0, r1, r0, ror #8
+  %B = lshr i32 %A, 8
+  %and = and i32 %B, 65535
+  %add = add i32 %and, %X
+  ret i32 %add
+}
+
+define zeroext i32 @test16(i32 %A, i32 %X) {
+; CHECK-LABEL: test16
+; CHECK-V6: uxtah r0, r1, r0, ror #24
+; CHECK-V7: uxtah r0, r1, r0, ror #24
+  %B = lshr i32 %A, 24
+  %C = shl i32 %A, 8
+  %D = or i32 %B, %C
+  %E = and i32 %D, 65535
+  %F = add i32 %E, %X
+  ret i32 %F
+}
Index: llvm/trunk/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll
===================================================================
--- llvm/trunk/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll
+++ llvm/trunk/test/CodeGen/Thumb2/thumb2-sxt-uxt.ll
@@ -1,4 +1,5 @@
 ; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m4 %s -o - | FileCheck %s --check-prefix=CHECK-M4
 
 define i32 @test1(i16 zeroext %z) nounwind {
 ; CHECK-LABEL: test1:
@@ -27,3 +28,78 @@
   %r = zext i8 %z to i32
   ret i32 %r
 }
+
+define i32 @test5(i32 %a, i8 %b) {
+; CHECK-LABEL: test5:
+; CHECK-NOT: sxtab
+; CHECK-M4: sxtab r0, r0, r1
+  %sext = sext i8 %b to i32
+  %add = add i32 %a, %sext
+  ret i32 %add
+}
+
+define i32 @test6(i32 %a, i32 %b) {
+; CHECK-LABEL: test6:
+; CHECK-NOT: sxtab
+; CHECK-M4: sxtab r0, r0, r1
+  %shl = shl i32 %b, 24
+  %ashr = ashr i32 %shl, 24
+  %add = add i32 %a, %ashr
+  ret i32 %add
+}
+
+define i32 @test7(i32 %a, i16 %b) {
+; CHECK-LABEL: test7:
+; CHECK-NOT: sxtah
+; CHECK-M4: sxtah r0, r0, r1
+  %sext = sext i16 %b to i32
+  %add = add i32 %a, %sext
+  ret i32 %add
+}
+
+define i32 @test8(i32 %a, i32 %b) {
+; CHECK-LABEL: test8:
+; CHECK-NOT: sxtah
+; CHECK-M4: sxtah r0, r0, r1
+  %shl = shl i32 %b, 16
+  %ashr = ashr i32 %shl, 16
+  %add = add i32 %a, %ashr
+  ret i32 %add
+}
+
+define i32 @test9(i32 %a, i8 %b) {
+; CHECK-LABEL: test9:
+; CHECK-NOT: uxtab
+; CHECK-M4: uxtab r0, r0, r1
+  %zext = zext i8 %b to i32
+  %add = add i32 %a, %zext
+  ret i32 %add
+}
+
+define i32 @test10(i32 %a, i32 %b) {
+;CHECK-LABEL: test10:
+;CHECK-NOT: uxtab
+;CHECK-M4: uxtab r0, r0, r1
+  %and = and i32 %b, 255
+  %add = add i32 %a, %and
+  ret i32 %add
+}
+
+define i32 @test11(i32 %a, i16 %b) {
+; CHECK-LABEL: test11:
+; CHECK-NOT: uxtah
+; CHECK-M4: uxtah r0, r0, r1
+  %zext = zext i16 %b to i32
+  %add = add i32 %a, %zext
+  ret i32 %add
+}
+
+define i32 @test12(i32 %a, i32 %b) {
+;CHECK-LABEL: test12:
+;CHECK-NOT: uxtah
+;CHECK-M4: uxtah r0, r0, r1
+  %and = and i32 %b, 65535
+  %add = add i32 %a, %and
+  ret i32 %add
+}
+
Index: llvm/trunk/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
===================================================================
--- llvm/trunk/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
+++ llvm/trunk/test/CodeGen/Thumb2/thumb2-sxt_rot.ll
@@ -1,9 +1,10 @@
-; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk %s -o - \
-; RUN:  | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=arm1156t2-s -mattr=+thumb2,+t2xtpk %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumb-eabi -mcpu=cortex-m3 %s -o - | FileCheck %s --check-prefix=CHECK-M3
 
 define i32 @test0(i8 %A) {
 ; CHECK-LABEL: test0:
 ; CHECK: sxtb r0, r0
+; CHECK-M3: sxtb r0, r0
         %B = sext i8 %A to i32
 	ret i32 %B
 }
@@ -11,6 +12,7 @@
 define signext i8 @test1(i32 %A)  {
 ; CHECK-LABEL: test1:
 ; CHECK: sbfx r0, r0, #8, #8
+; CHECK-M3: sbfx r0, r0, #8, #8
 	%B = lshr i32 %A, 8
 	%C = shl i32 %A, 24
 	%D = or i32 %B, %C
@@ -21,6 +23,7 @@
 define signext i32 @test2(i32 %A, i32 %X)  {
 ; CHECK-LABEL: test2:
 ; CHECK: sxtab  r0, r1, r0, ror #8
+; CHECK-M3-NOT: sxtab
 	%B = lshr i32 %A, 8
 	%C = shl i32 %A, 24
 	%D = or i32 %B, %C
@@ -33,10 +36,36 @@
 define i32 @test3(i32 %A, i32 %X) {
 ; CHECK-LABEL: test3:
 ; CHECK: sxtah r0, r0, r1, ror #8
+; CHECK-M3-NOT: sxtah
   %X.hi = lshr i32 %X, 8
   %X.trunc = trunc i32 %X.hi to i16
   %addend = sext i16 %X.trunc to i32
-
   %sum = add i32 %A, %addend
   ret i32 %sum
 }
+
+define signext i32 @test4(i32 %A, i32 %X)  {
+; CHECK-LABEL: test4:
+; CHECK: sxtab  r0, r1, r0, ror #16
+; CHECK-M3-NOT: sxtab
+	%B = lshr i32 %A, 16
+	%C = shl i32 %A, 16
+	%D = or i32 %B, %C
+	%E = trunc i32 %D to i8
+        %F = sext i8 %E to i32
+        %G = add i32 %F, %X
+	ret i32 %G
+}
+
+define signext i32 @test5(i32 %A, i32 %X)  {
+; CHECK-LABEL: test5:
+; CHECK: sxtah  r0, r1, r0, ror #24
+; CHECK-M3-NOT: sxtah
+	%B = lshr i32 %A, 24
+	%C = shl i32 %A, 8
+	%D = or i32 %B, %C
+	%E = trunc i32 %D to i16
+        %F = sext i16 %E to i32
+        %G = add i32 %F, %X
+	ret i32 %G
+}
Index: llvm/trunk/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
===================================================================
--- llvm/trunk/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
+++ llvm/trunk/test/CodeGen/Thumb2/thumb2-uxt_rot.ll
@@ -3,17 +3,16 @@
 ; rdar://11318438
 
 define zeroext i8 @test1(i32 %A.u)  {
-; A8-LABEL: test1:
+; CHECK-LABEL: test1:
 ; A8: uxtb r0, r0
     %B.u = trunc i32 %A.u to i8
     ret i8 %B.u
 }
 
 define zeroext i32 @test2(i32 %A.u, i32 %B.u)  {
-; A8-LABEL: test2:
+; CHECK-LABEL: test2:
 ; A8: uxtab  r0, r0, r1
 
-; M3-LABEL: test2:
 ; M3: uxtb  r1, r1
 ; M3-NOT: uxtab
 ; M3: add   r0, r1
@@ -24,8 +23,9 @@
 }
 
 define zeroext i32 @test3(i32 %A.u)  {
-; A8-LABEL: test3:
+; CHECK-LABEL: test3:
 ; A8: ubfx  r0, r0, #8, #16
+; M3: ubfx r0, r0, #8, #16
     %B.u = lshr i32 %A.u, 8
     %C.u = shl i32 %A.u, 24
     %D.u = or i32 %B.u, %C.u
@@ -35,23 +35,59 @@
 }
 
 define i32 @test4(i32 %A, i32 %X) {
-; A8-LABEL: test4:
+; CHECK-LABEL: test4:
 ; A8: uxtab r0, r0, r1, ror #16
+; M3-NOT: uxtab
   %X.hi = lshr i32 %X, 16
   %X.trunc = trunc i32 %X.hi to i8
   %addend = zext i8 %X.trunc to i32
-
   %sum = add i32 %A, %addend
   ret i32 %sum
 }
 
 define i32 @test5(i32 %A, i32 %X) {
-; A8-LABEL: test5:
+; CHECK-LABEL: test5:
 ; A8: uxtah r0, r0, r1, ror #8
+; M3-NOT: uxtah
   %X.hi = lshr i32 %X, 8
   %X.trunc = trunc i32 %X.hi to i16
   %addend = zext i16 %X.trunc to i32
+  %sum = add i32 %A, %addend
+  ret i32 %sum
+}
 
+define i32 @test6(i32 %A, i32 %X) {
+; CHECK-LABEL: test6:
+; A8: uxtab r0, r0, r1, ror #8
+; M3-NOT: uxtab
+  %X.hi = lshr i32 %X, 8
+  %X.trunc = trunc i32 %X.hi to i8
+  %addend = zext i8 %X.trunc to i32
   %sum = add i32 %A, %addend
   ret i32 %sum
 }
+
+define i32 @test7(i32 %A, i32 %X) {
+; CHECK-LABEL: test7:
+; A8: uxtah r0, r0, r1, ror #24
+; M3-NOT: uxtah
+  %lshr = lshr i32 %X, 24
+  %shl = shl i32 %X, 8
+  %or = or i32 %lshr, %shl
+  %trunc = trunc i32 %or to i16
+  %zext = zext i16 %trunc to i32
+  %add = add i32 %A, %zext
+  ret i32 %add
+}
+
+define i32 @test8(i32 %A, i32 %X) {
+; CHECK-LABEL: test8:
+; A8: uxtah r0, r0, r1, ror #24
+; M3-NOT: uxtah
+  %lshr = lshr i32 %X, 24
+  %shl = shl i32 %X, 8
+  %or = or i32 %lshr, %shl
+  %and = and i32 %or, 65535
+  %add = add i32 %A, %and
+  ret i32 %add
+}