Index: lib/Target/RISCV/RISCVISelLowering.h
===================================================================
--- lib/Target/RISCV/RISCVISelLowering.h
+++ lib/Target/RISCV/RISCVISelLowering.h
@@ -65,6 +65,7 @@
     return true;
   }
   SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
 };
 }
Index: lib/Target/RISCV/RISCVISelLowering.cpp
===================================================================
--- lib/Target/RISCV/RISCVISelLowering.cpp
+++ lib/Target/RISCV/RISCVISelLowering.cpp
@@ -49,17 +49,57 @@
 
   // TODO: add all necessary setOperationAction calls
 
+  setOperationAction(ISD::BR_JT, MVT::Other, Expand);
   setOperationAction(ISD::BR_CC, MVT::i32, Expand);
   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
-  setBooleanContents(ZeroOrOneBooleanContent);
+  setOperationAction(ISD::SELECT, MVT::i32, Expand);
+
+  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
+
+  setOperationAction(ISD::ADDC, MVT::i32, Expand);
+  setOperationAction(ISD::ADDE, MVT::i32, Expand);
+  setOperationAction(ISD::SUBC, MVT::i32, Expand);
+  setOperationAction(ISD::SUBE, MVT::i32, Expand);
+
+  setOperationAction(ISD::SREM, MVT::i32, Expand);
+  setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+  setOperationAction(ISD::SDIV, MVT::i32, Expand);
+  setOperationAction(ISD::UREM, MVT::i32, Expand);
+  setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+  setOperationAction(ISD::UDIV, MVT::i32, Expand);
+
+  setOperationAction(ISD::MUL, MVT::i32, Expand);
+  setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+  setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
+  setOperationAction(ISD::MULHS, MVT::i32, Expand);
+  setOperationAction(ISD::MULHU, MVT::i32, Expand);
+
+  setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
+  setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
+  setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
+
+  setOperationAction(ISD::ROTL, MVT::i32, Expand);
+  setOperationAction(ISD::ROTR, MVT::i32, Expand);
+  setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+  setOperationAction(ISD::CTTZ, MVT::i32, Expand);
+  setOperationAction(ISD::CTLZ, MVT::i32, Expand);
+  setOperationAction(ISD::CTPOP, MVT::i32, Expand);
 
   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
 
+  setBooleanContents(ZeroOrOneBooleanContent);
+
   // Function alignments (log2)
   setMinFunctionAlignment(3);
   setPrefFunctionAlignment(3);
 
-  // inline memcpy() for kernel to see explicit copy
+  // Effectively disable jump table generation
+  setMinimumJumpTableEntries(INT_MAX);
+
+  // inline memcpy()
   MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 128;
   MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 128;
   MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = 128;
@@ -99,6 +139,27 @@
   }
 }
 
+SDValue RISCVTargetLowering::lowerExternalSymbol(SDValue Op,
+                                                 SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  EVT Ty = Op.getValueType();
+  ExternalSymbolSDNode *N = cast<ExternalSymbolSDNode>(Op);
+  const char *Sym = N->getSymbol();
+
+  // TODO: should also handle gp-relative loads
+
+  if (!isPositionIndependent() && !Subtarget->is64Bit()) {
+    SDValue GAHi = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_HI);
+    SDValue GALo = DAG.getTargetExternalSymbol(Sym, Ty, RISCVII::MO_LO);
+    SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, GAHi), 0);
+    SDValue MNLo =
+        SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, GALo), 0);
+    return MNLo;
+  } else {
+    report_fatal_error("Unable to lowerExternalSymbol");
+  }
+}
+
 SDValue RISCVTargetLowering::lowerSELECT_CC(SDValue Op,
                                             SelectionDAG &DAG) const {
   SDValue LHS = Op.getOperand(0);
@@ -347,8 +408,7 @@
   if (isa<GlobalAddressSDNode>(Callee)) {
     Callee = lowerGlobalAddress(Callee, DAG);
   } else if (isa<ExternalSymbolSDNode>(Callee)) {
-    report_fatal_error(
-        "lowerExternalSymbol, needed for lowerCall, not yet handled");
+    Callee = lowerExternalSymbol(Callee, DAG);
   }
 
   // The first call operand is the chain and the second is the target address.
Index: lib/Target/RISCV/RISCVInstrInfo.td
===================================================================
--- lib/Target/RISCV/RISCVInstrInfo.td
+++ lib/Target/RISCV/RISCVInstrInfo.td
@@ -304,6 +304,17 @@
 def : PatGprGpr<setult, SLTU>;
 def : PatGprSimm12<setult, SLTIU>;
 
+// Define pattern expansions for setcc operations that aren't directly
+// handled by a RISC-V instruction
+def : Pat<(seteq GPR:$rs1, GPR:$rs2), (SLTIU (XOR GPR:$rs1, GPR:$rs2), 1)>;
+def : Pat<(setne GPR:$rs1, GPR:$rs2), (SLTU X0_32, (XOR GPR:$rs1, GPR:$rs2))>;
+def : Pat<(setugt GPR:$rs1, GPR:$rs2), (SLTU GPR:$rs2, GPR:$rs1)>;
+def : Pat<(setuge GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs1, GPR:$rs2), 1)>;
+def : Pat<(setule GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs2, GPR:$rs1), 1)>;
+def : Pat<(setgt GPR:$rs1, GPR:$rs2), (SLT GPR:$rs2, GPR:$rs1)>;
+def : Pat<(setge GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>;
+def : Pat<(setle GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs2, GPR:$rs1), 1)>;
+
 let usesCustomInserter = 1 in
 def Select : Pseudo<(outs GPR:$dst),
                     (ins GPR:$lhs, GPR:$rhs, i32imm:$imm, GPR:$src, GPR:$src2),
@@ -344,11 +355,14 @@
 def PseudoBR : Pseudo<(outs), (ins simm21_lsb0:$imm20), [(br bb:$imm20)]>,
                PseudoInstExpansion<(JAL X0_32, simm21_lsb0:$imm20)>;
 
-let isCall = 1, Defs=[X1_32] in
+let isBranch = 1, isBarrier = 1, isTerminator = 1, isIndirectBranch = 1 in
+def PseudoBRIND : Pseudo<(outs), (ins GPR:$rs1), [(brind GPR:$rs1)]>,
+                  PseudoInstExpansion<(JALR X0_32, GPR:$rs1, 0)>;
+
+let isCall = 1, Defs = [X1_32] in
 def PseudoCALL : Pseudo<(outs), (ins GPR:$rs1), [(Call GPR:$rs1)]>,
                  PseudoInstExpansion<(JALR X1_32, GPR:$rs1, 0)>;
 
-
 let isBarrier = 1, isReturn = 1, isTerminator = 1 in
 def PseudoRET : Pseudo<(outs), (ins), [(RetFlag)]>,
                 PseudoInstExpansion<(JALR X0_32, X1_32, 0)>;
Index: lib/Target/RISCV/RISCVMCInstLower.cpp
===================================================================
--- lib/Target/RISCV/RISCVMCInstLower.cpp
+++ lib/Target/RISCV/RISCVMCInstLower.cpp
@@ -82,6 +82,10 @@
     case MachineOperand::MO_GlobalAddress:
       MCOp = lowerSymbolOperand(MO, AP.getSymbol(MO.getGlobal()), AP);
       break;
+    case MachineOperand::MO_ExternalSymbol:
+      MCOp = lowerSymbolOperand(
+          MO, AP.GetExternalSymbolSymbol(MO.getSymbolName()), AP);
+      break;
     }
     return true;
 }
Index: test/CodeGen/RISCV/addc-adde-sube-subc.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/addc-adde-sube-subc.ll
@@ -0,0 +1,26 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+; Ensure that the ISDOpcodes ADDC, ADDE, SUBC, SUBE are handled correctly
+
+define i64 @addc_adde(i64 %a, i64 %b) {
+; CHECK-LABEL: addc_adde:
+; CHECK: add a1, a1, a3
+; CHECK: add a2, a0, a2
+; CHECK: sltu a0, a2, a0
+; CHECK: add a1, a1, a0
+; CHECK: addi a0, a2, 0
+; CHECK: jalr zero, ra, 0
+  %1 = add i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @subc_sube(i64 %a, i64 %b) {
+; CHECK-LABEL: subc_sube:
+; CHECK: sub a1, a1, a3
+; CHECK: sltu a3, a0, a2
+; CHECK: sub a1, a1, a3
+; CHECK: sub a0, a0, a2
+; CHECK: jalr zero, ra, 0
+  %1 = sub i64 %a, %b
+  ret i64 %1
+}
Index: test/CodeGen/RISCV/alu.ll
===================================================================
--- test/CodeGen/RISCV/alu.ll
+++ test/CodeGen/RISCV/alu.ll
@@ -1,5 +1,9 @@
 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
 
+; These tests are each targeted at a particular RISC-V ALU instruction. Other
+; files in this folder exercise LLVM IR instructions that don't directly match a
+; RISC-V instruction
+
 ; Register-immediate instructions
 
 define i32 @addi(i32 %a) nounwind {
Index: test/CodeGen/RISCV/bare-select.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/bare-select.ll
@@ -0,0 +1,13 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+define i32 @bare_select(i1 %a, i32 %b, i32 %c) {
+; CHECK-LABEL: bare_select:
+; CHECK: andi a0, a0, 1
+; CHECK: addi a3, zero, 0
+; CHECK: bne a0, a3, .LBB0_2
+; CHECK: addi a1, a2, 0
+; CHECK: .LBB0_2:
+; CHECK: addi a0, a1, 0
+  %1 = select i1 %a, i32 %b, i32 %c
+  ret i32 %1
+}
Index: test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll
@@ -0,0 +1,89 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+declare i16 @llvm.bswap.i16(i16)
+declare i32 @llvm.bswap.i32(i32)
+declare i64 @llvm.bswap.i64(i64)
+declare i8 @llvm.cttz.i8(i8, i1)
+declare i16 @llvm.cttz.i16(i16, i1)
+declare i32 @llvm.cttz.i32(i32, i1)
+declare i64 @llvm.cttz.i64(i64, i1)
+declare i32 @llvm.ctlz.i32(i32, i1)
+declare i32 @llvm.ctpop.i32(i32)
+
+define i16 @test_bswap_i16(i16 %a) {
+; CHECK-LABEL: test_bswap_i16:
+  %tmp = call i16 @llvm.bswap.i16(i16 %a)
+  ret i16 %tmp
+}
+
+define i32 @test_bswap_i32(i32 %a) {
+; CHECK-LABEL: test_bswap_i32:
+  %tmp = call i32 @llvm.bswap.i32(i32 %a)
+  ret i32 %tmp
+}
+
+define i64 @test_bswap_i64(i64 %a) {
+; CHECK-LABEL: test_bswap_i64:
+  %tmp = call i64 @llvm.bswap.i64(i64 %a)
+  ret i64 %tmp
+}
+
+define i8 @test_cttz_i8(i8 %a) {
+; CHECK-LABEL: test_cttz_i8:
+  %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 false)
+  ret i8 %tmp
+}
+
+define i16 @test_cttz_i16(i16 %a) {
+; CHECK-LABEL: test_cttz_i16:
+  %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 false)
+  ret i16 %tmp
+}
+
+define i32 @test_cttz_i32(i32 %a) {
+; CHECK-LABEL: test_cttz_i32:
+  %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 false)
+  ret i32 %tmp
+}
+
+define i32 @test_ctlz_i32(i32 %a) {
+; CHECK-LABEL: test_ctlz_i32:
+  %tmp = call i32 @llvm.ctlz.i32(i32 %a, i1 false)
+  ret i32 %tmp
+}
+
+define i64 @test_cttz_i64(i64 %a) {
+; CHECK-LABEL: test_cttz_i64:
+  %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 false)
+  ret i64 %tmp
+}
+
+define i8 @test_cttz_i8_zero_undef(i8 %a) {
+; CHECK-LABEL: test_cttz_i8_zero_undef:
+  %tmp = call i8 @llvm.cttz.i8(i8 %a, i1 true)
+  ret i8 %tmp
+}
+
+define i16 @test_cttz_i16_zero_undef(i16 %a) {
+; CHECK-LABEL: test_cttz_i16_zero_undef:
+  %tmp = call i16 @llvm.cttz.i16(i16 %a, i1 true)
+  ret i16 %tmp
+}
+
+define i32 @test_cttz_i32_zero_undef(i32 %a) {
+; CHECK-LABEL: test_cttz_i32_zero_undef:
+  %tmp = call i32 @llvm.cttz.i32(i32 %a, i1 true)
+  ret i32 %tmp
+}
+
+define i64 @test_cttz_i64_zero_undef(i64 %a) {
+; CHECK-LABEL: test_cttz_i64_zero_undef:
+  %tmp = call i64 @llvm.cttz.i64(i64 %a, i1 true)
+  ret i64 %tmp
+}
+
+define i32 @test_ctpop_i32(i32 %a) {
+; CHECK-LABEL: test_ctpop_i32:
+  %1 = call i32 @llvm.ctpop.i32(i32 %a)
+  ret i32 %1
+}
Index: test/CodeGen/RISCV/div.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/div.ll
@@ -0,0 +1,96 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+define i32 @udiv(i32 %a, i32 %b) {
+; CHECK-LABEL: udiv:
+; CHECK: lui a2, %hi(__udivsi3)
+; CHECK: addi a2, a2, %lo(__udivsi3)
+; CHECK: jalr ra, a2, 0
+  %1 = udiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @udiv_constant(i32 %a) {
+; CHECK-LABEL: udiv_constant:
+; CHECK: lui a1, %hi(__udivsi3)
+; CHECK: addi a2, a1, %lo(__udivsi3)
+; CHECK: addi a1, zero, 5
+; CHECK: jalr ra, a2, 0
+  %1 = udiv i32 %a, 5
+  ret i32 %1
+}
+
+define i32 @udiv_pow2(i32 %a) {
+; CHECK-LABEL: udiv_pow2:
+; CHECK: srli a0, a0, 3
+  %1 = udiv i32 %a, 8
+  ret i32 %1
+}
+
+define i64 @udiv64(i64 %a, i64 %b) {
+; CHECK-LABEL: udiv64:
+; CHECK: lui a4, %hi(__udivdi3)
+; CHECK: addi a4, a4, %lo(__udivdi3)
+; CHECK: jalr ra, a4, 0
+  %1 = udiv i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @udiv64_constant(i64 %a) {
+; CHECK-LABEL: udiv64_constant:
+; CHECK: lui a2, %hi(__udivdi3)
+; CHECK: addi a4, a2, %lo(__udivdi3)
+; CHECK: addi a2, zero, 5
+; CHECK: addi a3, zero, 0
+; CHECK: jalr ra, a4, 0
+  %1 = udiv i64 %a, 5
+  ret i64 %1
+}
+
+define i32 @sdiv(i32 %a, i32 %b) {
+; CHECK-LABEL: sdiv:
+; CHECK: lui a2, %hi(__divsi3)
+; CHECK: addi a2, a2, %lo(__divsi3)
+; CHECK: jalr ra, a2, 0
+  %1 = sdiv i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @sdiv_constant(i32 %a) {
+; CHECK-LABEL: sdiv_constant:
+; CHECK: lui a1, %hi(__divsi3)
+; CHECK: addi a2, a1, %lo(__divsi3)
+; CHECK: addi a1, zero, 5
+; CHECK: jalr ra, a2, 0
+  %1 = sdiv i32 %a, 5
+  ret i32 %1
+}
+
+define i32 @sdiv_pow2(i32 %a) {
+; CHECK-LABEL: sdiv_pow2
+; CHECK: srai a1, a0, 31
+; CHECK: srli a1, a1, 29
+; CHECK: add a0, a0, a1
+; CHECK: srai a0, a0, 3
+  %1 = sdiv i32 %a, 8
+  ret i32 %1
+}
+
+define i64 @sdiv64(i64 %a, i64 %b) {
+; CHECK-LABEL: sdiv64:
+; CHECK: lui a4, %hi(__divdi3)
+; CHECK: addi a4, a4, %lo(__divdi3)
+; CHECK: jalr ra, a4, 0
+  %1 = sdiv i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @sdiv64_constant(i64 %a) {
+; CHECK-LABEL: sdiv64_constant:
+; CHECK: lui a2, %hi(__divdi3)
+; CHECK: addi a4, a2, %lo(__divdi3)
+; CHECK: addi a2, zero, 5
+; CHECK: addi a3, zero, 0
+; CHECK: jalr ra, a4, 0
+  %1 = sdiv i64 %a, 5
+  ret i64 %1
+}
Index: test/CodeGen/RISCV/indirectbr.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/indirectbr.ll
@@ -0,0 +1,14 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+define i32 @indirectbr(i8* %target) nounwind {
+; CHECK-LABEL: indirectbr:
+; CHECK: jalr zero, a0, 0
+; CHECK: .LBB0_1:
+; CHECK: addi a0, zero, 0
+; CHECK: jalr zero, ra, 0
+  indirectbr i8* %target, [label %test_label]
+test_label:
+  br label %ret
+ret:
+  ret i32 0
+}
Index: test/CodeGen/RISCV/jumptable.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/jumptable.ll
@@ -0,0 +1,26 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+define void @jt(i32 %in, i32* %out) {
+; CHECK-LABEL: jt:
+entry:
+  switch i32 %in, label %exit [
+    i32 1, label %bb1
+    i32 2, label %bb2
+    i32 3, label %bb3
+    i32 4, label %bb4
+  ]
+bb1:
+  store i32 4, i32* %out
+  br label %exit
+bb2:
+  store i32 3, i32* %out
+  br label %exit
+bb3:
+  store i32 2, i32* %out
+  br label %exit
+bb4:
+  store i32 1, i32* %out
+  br label %exit
+exit:
+  ret void
+}
Index: test/CodeGen/RISCV/mul.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/mul.ll
@@ -0,0 +1,58 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+define i32 @square(i32 %a) {
+; CHECK-LABEL: square:
+; CHECK: lui a1, %hi(__mulsi3)
+; CHECK: addi a2, a1, %lo(__mulsi3)
+; CHECK: addi a1, a0, 0
+; CHECK: jalr ra, a2, 0
+  %1 = mul i32 %a, %a
+  ret i32 %1
+}
+
+define i32 @mul(i32 %a, i32 %b) {
+; CHECK-LABEL: mul:
+; CHECK: lui a2, %hi(__mulsi3)
+; CHECK: addi a2, a2, %lo(__mulsi3)
+; CHECK: jalr ra, a2, 0
+  %1 = mul i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @mul_constant(i32 %a) {
+; CHECK-LABEL: mul_constant:
+; CHECK: lui a1, %hi(__mulsi3)
+; CHECK: addi a2, a1, %lo(__mulsi3)
+; CHECK: addi a1, zero, 5
+; CHECK: jalr ra, a2, 0
+  %1 = mul i32 %a, 5
+  ret i32 %1
+}
+
+define i32 @mul_pow2(i32 %a) {
+; CHECK-LABEL: mul_pow2:
+; CHECK: slli a0, a0, 3
+; CHECK: jalr zero, ra, 0
+  %1 = mul i32 %a, 8
+  ret i32 %1
+}
+
+define i64 @mul64(i64 %a, i64 %b) {
+; CHECK-LABEL: mul64:
+; CHECK: lui a4, %hi(__muldi3)
+; CHECK: addi a4, a4, %lo(__muldi3)
+; CHECK: jalr ra, a4, 0
+  %1 = mul i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @mul64_constant(i64 %a) {
+; CHECK-LABEL: mul64_constant:
+; CHECK: lui a2, %hi(__muldi3)
+; CHECK: addi a4, a2, %lo(__muldi3)
+; CHECK: addi a2, zero, 5
+; CHECK: addi a3, zero, 0
+; CHECK: jalr ra, a4, 0
+  %1 = mul i64 %a, 5
+  ret i64 %1
+}
Index: test/CodeGen/RISCV/rem.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/rem.ll
@@ -0,0 +1,19 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+define i32 @urem(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: urem:
+; CHECK: lui a2, %hi(__umodsi3)
+; CHECK: addi a2, a2, %lo(__umodsi3)
+; CHECK: jalr ra, a2, 0
+  %1 = urem i32 %a, %b
+  ret i32 %1
+}
+
+define i32 @srem(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: srem:
+; CHECK: lui a2, %hi(__modsi3)
+; CHECK: addi a2, a2, %lo(__modsi3)
+; CHECK: jalr ra, a2, 0
+  %1 = srem i32 %a, %b
+  ret i32 %1
+}
Index: test/CodeGen/RISCV/rotl-rotr.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/rotl-rotr.ll
@@ -0,0 +1,22 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+; These IR sequences will generate ISD::ROTL and ISD::ROTR nodes, that the 
+; RISC-V backend must be able to select
+
+define i32 @rotl(i32 %x, i32 %y) {
+; CHECK-LABEL: rotl:
+  %z = sub i32 32, %y
+  %b = shl i32 %x, %y
+  %c = lshr i32 %x, %z
+  %d = or i32 %b, %c
+  ret i32 %d
+}
+
+define i32 @rotr(i32 %x, i32 %y) {
+; CHECK-LABEL: rotr:
+  %z = sub i32 32, %y
+  %b = lshr i32 %x, %y
+  %c = shl i32 %x, %z
+  %d = or i32 %b, %c
+  ret i32 %d
+}
Index: test/CodeGen/RISCV/setcc.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/setcc.ll
@@ -0,0 +1,91 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+; TODO: check the generated instructions for the equivalent of seqz, snez, sltz, sgtz map to something simple
+
+define i32 @eq(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: eq:
+; CHECK: xor a0, a0, a1
+; CHECK: sltiu a0, a0, 1
+  %1 = icmp eq i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @ne(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: ne:
+; CHECK: xor a0, a0, a1
+; CHECK: sltu a0, zero, a0
+  %1 = icmp ne i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @ugt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: ugt:
+; CHECK: sltu a0, a1, a0
+  %1 = icmp ugt i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @uge(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: uge:
+; CHECK: sltu a0, a0, a1
+; CHECK: xori a0, a0, 1
+  %1 = icmp uge i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @ult(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: ult:
+; CHECK: sltu a0, a0, a1
+  %1 = icmp ult i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @ule(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: ule:
+; CHECK: sltu a0, a1, a0
+; CHECK: xori a0, a0, 1
+  %1 = icmp ule i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @sgt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: sgt:
+; CHECK: slt a0, a1, a0
+  %1 = icmp sgt i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @sge(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: sge:
+; CHECK: slt a0, a0, a1
+; CHECK: xori a0, a0, 1
+  %1 = icmp sge i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @slt(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: slt:
+; CHECK: slt a0, a0, a1
+  %1 = icmp slt i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+define i32 @sle(i32 %a, i32 %b) nounwind {
+; CHECK-LABEL: sle:
+; CHECK: slt a0, a1, a0
+; CHECK: xori a0, a0, 1
+  %1 = icmp sle i32 %a, %b
+  %2 = zext i1 %1 to i32
+  ret i32 %2
+}
+
+; TODO: check variants with an immediate?
Index: test/CodeGen/RISCV/sext-zext-trunc.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/sext-zext-trunc.ll
@@ -0,0 +1,233 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+; FIXME: an unncessary register is allocated just to store 0. X0 should be
+; used instead
+
+define i8 @sext_i1_to_i8(i1 %a) {
+; TODO: the addi that stores 0 in t1 is unnecessary
+; CHECK-LABEL: sext_i1_to_i8
+; CHECK: andi a0, a0, 1
+; CHECK: addi a1, zero, 0
+; CHECK: sub a0, a1, a0
+  %1 = sext i1 %a to i8
+  ret i8 %1
+}
+
+define i16 @sext_i1_to_i16(i1 %a) {
+; TODO: the addi that stores 0 in t1 is unnecessary
+; CHECK-LABEL: sext_i1_to_i16
+; CHECK: andi a0, a0, 1
+; CHECK: addi a1, zero, 0
+; CHECK: sub a0, a1, a0
+  %1 = sext i1 %a to i16
+  ret i16 %1
+}
+
+define i32 @sext_i1_to_i32(i1 %a) {
+; TODO: the addi that stores 0 in t1 is unnecessary
+; CHECK-LABEL: sext_i1_to_i32
+; CHECK: andi a0, a0, 1
+; CHECK: addi a1, zero, 0
+; CHECK: sub a0, a1, a0
+  %1 = sext i1 %a to i32
+  ret i32 %1
+}
+
+define i64 @sext_i1_to_i64(i1 %a) {
+; TODO: the addi that stores 0 in t1 is unnecessary
+; CHECK-LABEL: sext_i1_to_i64
+; CHECK: andi a0, a0, 1
+; CHECK: addi a1, zero, 0
+; CHECK: sub a0, a1, a0
+  %1 = sext i1 %a to i64
+  ret i64 %1
+}
+
+define i16 @sext_i8_to_i16(i8 %a) {
+; CHECK-LABEL: sext_i8_to_i16
+; CHECK: slli a0, a0, 24
+; CHECK: srai a0, a0, 24
+  %1 = sext i8 %a to i16
+  ret i16 %1
+}
+
+define i32 @sext_i8_to_i32(i8 %a) {
+; CHECK-LABEL: sext_i8_to_i32
+; CHECK: slli a0, a0, 24
+; CHECK: srai a0, a0, 24
+  %1 = sext i8 %a to i32
+  ret i32 %1
+}
+
+define i64 @sext_i8_to_i64(i8 %a) {
+; CHECK-LABEL: sext_i8_to_i64
+; CHECK: slli a1, a0, 24
+; CHECK: srai a0, a1, 24
+; CHECK: srai a1, a1, 31
+  %1 = sext i8 %a to i64
+  ret i64 %1
+}
+
+define i32 @sext_i16_to_i32(i16 %a) {
+; CHECK-LABEL: sext_i16_to_i32
+; CHECK: slli a0, a0, 16
+; CHECK: srai a0, a0, 16
+  %1 = sext i16 %a to i32
+  ret i32 %1
+}
+
+define i64 @sext_i16_to_i64(i16 %a) {
+; CHECK-LABEL: sext_i16_to_i64
+; CHECK: slli a1, a0, 16
+; CHECK: srai a0, a1, 16
+; CHECK: srai a1, a1, 31
+  %1 = sext i16 %a to i64
+  ret i64 %1
+}
+
+define i64 @sext_i32_to_i64(i32 %a) {
+; CHECK-LABEL: sext_i32_to_i64
+; CHECK: srai a1, a0, 31
+  %1 = sext i32 %a to i64
+  ret i64 %1
+}
+
+define i8 @zext_i1_to_i8(i1 %a) {
+; CHECK-LABEL: zext_i1_to_i8
+; CHECK: andi a0, a0, 1
+  %1 = zext i1 %a to i8
+  ret i8 %1
+}
+
+define i16 @zext_i1_to_i16(i1 %a) {
+; CHECK-LABEL: zext_i1_to_i16
+; CHECK: andi a0, a0, 1
+  %1 = zext i1 %a to i16
+  ret i16 %1
+}
+
+define i32 @zext_i1_to_i32(i1 %a) {
+; CHECK-LABEL: zext_i1_to_i32
+; CHECK: andi a0, a0, 1
+  %1 = zext i1 %a to i32
+  ret i32 %1
+}
+
+define i64 @zext_i1_to_i64(i1 %a) {
+; CHECK-LABEL: zext_i1_to_i64
+; CHECK: andi a0, a0, 1
+; CHECK: addi a1, zero, 0
+  %1 = zext i1 %a to i64
+  ret i64 %1
+}
+
+define i16 @zext_i8_to_i16(i8 %a) {
+; CHECK-LABEL: zext_i8_to_i16
+; CHECK: andi a0, a0, 255
+  %1 = zext i8 %a to i16
+  ret i16 %1
+}
+
+define i32 @zext_i8_to_i32(i8 %a) {
+; CHECK-LABEL: zext_i8_to_i32
+; CHECK: andi a0, a0, 255
+  %1 = zext i8 %a to i32
+  ret i32 %1
+}
+
+define i64 @zext_i8_to_i64(i8 %a) {
+; CHECK-LABEL: zext_i8_to_i64
+; CHECK: andi a0, a0, 255
+; CHECK: addi a1, zero, 0
+  %1 = zext i8 %a to i64
+  ret i64 %1
+}
+
+define i32 @zext_i16_to_i32(i16 %a) {
+; CHECK-LABEL: zext_i16_to_i32
+; CHECK: lui a1, 16
+; CHECK: addi a1, a1, -1
+; CHECK: and a0, a0, a1
+  %1 = zext i16 %a to i32
+  ret i32 %1
+}
+
+define i64 @zext_i16_to_i64(i16 %a) {
+; CHECK-LABEL: zext_i16_to_i64
+; CHECK: lui a1, 16
+; CHECK: addi a1, a1, -1
+; CHECK: and a0, a0, a1
+; CHECK: addi a1, zero, 0
+  %1 = zext i16 %a to i64
+  ret i64 %1
+}
+
+define i64 @zext_i32_to_i64(i32 %a) {
+; CHECK-LABEL: zext_i32_to_i64
+; CHECK: addi a1, zero, 0
+  %1 = zext i32 %a to i64
+  ret i64 %1
+}
+
+; TODO: should the trunc tests explicitly ensure no code is generated before
+; jalr?
+
+define i1 @trunc_i8_to_i1(i8 %a) {
+; CHECK-LABEL: trunc_i8_to_i1
+  %1 = trunc i8 %a to i1
+  ret i1 %1
+}
+
+define i1 @trunc_i16_to_i1(i16 %a) {
+; CHECK-LABEL: trunc_i16_to_i1
+  %1 = trunc i16 %a to i1
+  ret i1 %1
+}
+
+define i1 @trunc_i32_to_i1(i32 %a) {
+; CHECK-LABEL: trunc_i32_to_i1
+  %1 = trunc i32 %a to i1
+  ret i1 %1
+}
+
+define i1 @trunc_i64_to_i1(i64 %a) {
+; CHECK-LABEL: trunc_i64_to_i1
+  %1 = trunc i64 %a to i1
+  ret i1 %1
+}
+
+define i8 @trunc_i16_to_i8(i16 %a) {
+; CHECK-LABEL: trunc_i16_to_i8
+  %1 = trunc i16 %a to i8
+  ret i8 %1
+}
+
+define i8 @trunc_i32_to_i8(i32 %a) {
+; CHECK-LABEL: trunc_i32_to_i8
+  %1 = trunc i32 %a to i8
+  ret i8 %1
+}
+
+define i8 @trunc_i64_to_i8(i64 %a) {
+; CHECK-LABEL: trunc_i64_to_i8
+  %1 = trunc i64 %a to i8
+  ret i8 %1
+}
+
+define i16 @trunc_i32_to_i16(i32 %a) {
+; CHECK-LABEL: trunc_i32_to_i16
+  %1 = trunc i32 %a to i16
+  ret i16 %1
+}
+
+define i16 @trunc_i64_to_i16(i64 %a) {
+; CHECK-LABEL: trunc_i64_to_i16
+  %1 = trunc i64 %a to i16
+  ret i16 %1
+}
+
+define i32 @trunc_i64_to_i32(i64 %a) {
+; CHECK-LABEL: trunc_i64_to_i32
+  %1 = trunc i64 %a to i32
+  ret i32 %1
+}
Index: test/CodeGen/RISCV/shifts.ll
===================================================================
--- /dev/null
+++ test/CodeGen/RISCV/shifts.ll
@@ -0,0 +1,28 @@
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+
+; Basic shift support is tested as part of ALU.ll. This file ensures that
+; shifts which may not be supported natively are lowered properly.
+
+define i64 @lshr64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: lshr64:
+; CHECK: lui a3, %hi(__lshrdi3)
+; CHECK: addi a3, a3, %lo(__lshrdi3)
+  %1 = lshr i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @ashr64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: ashr64:
+; CHECK: lui a3, %hi(__ashrdi3)
+; CHECK: addi a3, a3, %lo(__ashrdi3)
+  %1 = ashr i64 %a, %b
+  ret i64 %1
+}
+
+define i64 @shl64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: shl64:
+; CHECK: lui a3, %hi(__ashldi3)
+; CHECK: addi a3, a3, %lo(__ashldi3)
+  %1 = shl i64 %a, %b
+  ret i64 %1
+}