tlively created this revision.
tlively added reviewers: aheejin, dschuff.
Herald added subscribers: llvm-commits, cfe-commits, ecnelises, sunfish, 
hiraditya, jgravelle-google, sbc100.
Herald added projects: clang, LLVM.
tlively requested review of this revision.

Prototype the newly proposed load_lane instructions, as specified in
https://github.com/WebAssembly/simd/pull/350. Since these instructions are not
available to origin trial users on Chrome stable, make them opt-in by only
selecting them from intrinsics rather than normal ISel patterns. Since we only
need rough prototypes to measure performance right now, this commit does not
implement all the load and store patterns that would be necessary to make full
use of the offset immediate. However, the full suite of offset tests is included
to make it easy to track improvements in the future.

Since these are the first instructions to have a memarg immediate as well as an
additional immediate, the disassembler needed some additional hacks to be able
to parse them correctly. Making that code more principled is left as future
work.


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D89366

Files:
  clang/include/clang/Basic/BuiltinsWebAssembly.def
  clang/lib/CodeGen/CGBuiltin.cpp
  clang/test/CodeGen/builtins-wasm.c
  llvm/include/llvm/IR/IntrinsicsWebAssembly.td
  llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
  llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
  llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
  llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
  llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
  llvm/test/MC/WebAssembly/simd-encodings.s

Index: llvm/test/MC/WebAssembly/simd-encodings.s
===================================================================
--- llvm/test/MC/WebAssembly/simd-encodings.s
+++ llvm/test/MC/WebAssembly/simd-encodings.s
@@ -280,6 +280,30 @@
     # CHECK: v128.bitselect # encoding: [0xfd,0x52]
     v128.bitselect
 
+    # CHECK: v128.load8_lane 32, 1 # encoding: [0xfd,0x58,0x00,0x20,0x01]
+    v128.load8_lane 32, 1
+
+    # CHECK: v128.load16_lane 32, 1 # encoding: [0xfd,0x59,0x01,0x20,0x01]
+    v128.load16_lane 32, 1
+
+    # CHECK: v128.load32_lane 32, 1 # encoding: [0xfd,0x5a,0x02,0x20,0x01]
+    v128.load32_lane 32, 1
+
+    # CHECK: v128.load64_lane 32, 1 # encoding: [0xfd,0x5b,0x03,0x20,0x01]
+    v128.load64_lane 32, 1
+
+    # CHECK: v128.store8_lane 32, 1 # encoding: [0xfd,0x5c,0x00,0x20,0x01]
+    v128.store8_lane 32, 1
+
+    # CHECK: v128.store16_lane 32, 1 # encoding: [0xfd,0x5d,0x01,0x20,0x01]
+    v128.store16_lane 32, 1
+
+    # CHECK: v128.store32_lane 32, 1 # encoding: [0xfd,0x5e,0x02,0x20,0x01]
+    v128.store32_lane 32, 1
+
+    # CHECK: v128.store64_lane 32, 1 # encoding: [0xfd,0x5f,0x03,0x20,0x01]
+    v128.store64_lane 32, 1
+
     # CHECK: i8x16.abs # encoding: [0xfd,0x60]
     i8x16.abs
 
Index: llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
@@ -0,0 +1,933 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mattr=+simd128 | FileCheck %s
+
+; Test SIMD v128.load{8,16,32,64}_lane instructions. TODO: Use the offset field.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+declare <16 x i8> @llvm.wasm.load8.lane(i32, i8*)
+declare <8 x i16> @llvm.wasm.load16.lane(i32, i16*)
+declare <4 x i32> @llvm.wasm.load32.lane(i32, i32*)
+declare <2 x i64> @llvm.wasm.load64.lane(i32, i64*)
+
+declare void @llvm.wasm.store8.lane(<16 x i8>, i32, i8*)
+declare void @llvm.wasm.store16.lane(<8 x i16>, i32, i16*)
+declare void @llvm.wasm.store32.lane(<4 x i32>, i32, i32*)
+declare void @llvm.wasm.store64.lane(<2 x i64>, i32, i64*)
+
+;===----------------------------------------------------------------------------
+; v128.load8_lane / v128.store8_lane
+;===----------------------------------------------------------------------------
+
+define <16 x i8> @load_lane_i8_no_offset(i8* %p) {
+; CHECK-LABEL: load_lane_i8_no_offset:
+; CHECK:         .functype load_lane_i8_no_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %v = tail call <16 x i8> @llvm.wasm.load8.lane(i32 0, i8* %p)
+  ret <16 x i8> %v
+}
+
+define <16 x i8> @load_lane_i8_with_folded_offset(i8* %p) {
+; CHECK-LABEL: load_lane_i8_with_folded_offset:
+; CHECK:         .functype load_lane_i8_with_folded_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i32 0, i8* %s)
+  ret <16 x i8> %t
+}
+
+define <16 x i8> @load_lane_i8_with_folded_gep_offset(i8* %p) {
+; CHECK-LABEL: load_lane_i8_with_folded_gep_offset:
+; CHECK:         .functype load_lane_i8_with_folded_gep_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i32 6
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i32 0, i8* %s)
+  ret <16 x i8> %t
+}
+
+define <16 x i8> @load_lane_i8_with_unfolded_gep_negative_offset(i8* %p) {
+; CHECK-LABEL: load_lane_i8_with_unfolded_gep_negative_offset:
+; CHECK:         .functype load_lane_i8_with_unfolded_gep_negative_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const -6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i32 -6
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i32 0, i8* %s)
+  ret <16 x i8> %t
+}
+
+define <16 x i8> @load_lane_i8_with_unfolded_offset(i8* %p) {
+; CHECK-LABEL: load_lane_i8_with_unfolded_offset:
+; CHECK:         .functype load_lane_i8_with_unfolded_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i32 0, i8* %s)
+  ret <16 x i8> %t
+}
+
+define <16 x i8> @load_lane_i8_with_unfolded_gep_offset(i8* %p) {
+; CHECK-LABEL: load_lane_i8_with_unfolded_gep_offset:
+; CHECK:         .functype load_lane_i8_with_unfolded_gep_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i8, i8* %p, i32 6
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i32 0, i8* %s)
+  ret <16 x i8> %t
+}
+
+define <16 x i8> @load_lane_i8_from_numeric_address() {
+; CHECK-LABEL: load_lane_i8_from_numeric_address:
+; CHECK:         .functype load_lane_i8_from_numeric_address () -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i8*
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i32 0, i8* %s)
+  ret <16 x i8> %t
+}
+
+@gv_i8 = global i8 0
+define <16 x i8> @load_lane_i8_from_global_address() {
+; CHECK-LABEL: load_lane_i8_from_global_address:
+; CHECK:         .functype load_lane_i8_from_global_address () -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i8
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i32 0, i8* @gv_i8)
+  ret <16 x i8> %t
+}
+
+define void @store_lane_i8_no_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_no_offset:
+; CHECK:         .functype store_lane_i8_no_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store8.lane(<16 x i8> %v, i32 0, i8* %p)
+  ret void
+}
+
+define void @store_lane_i8_with_folded_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_with_folded_offset:
+; CHECK:         .functype store_lane_i8_with_folded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  tail call void @llvm.wasm.store8.lane(<16 x i8> %v, i32 0, i8* %s)
+  ret void
+}
+
+define void @store_lane_i8_with_folded_gep_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_with_folded_gep_offset:
+; CHECK:         .functype store_lane_i8_with_folded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i32 6
+  tail call void @llvm.wasm.store8.lane(<16 x i8> %v, i32 0, i8* %s)
+  ret void
+}
+
+define void @store_lane_i8_with_unfolded_gep_negative_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_with_unfolded_gep_negative_offset:
+; CHECK:         .functype store_lane_i8_with_unfolded_gep_negative_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const -6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i32 -6
+  tail call void @llvm.wasm.store8.lane(<16 x i8> %v, i32 0, i8* %s)
+  ret void
+}
+
+define void @store_lane_i8_with_unfolded_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_with_unfolded_offset:
+; CHECK:         .functype store_lane_i8_with_unfolded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  tail call void @llvm.wasm.store8.lane(<16 x i8> %v, i32 0, i8* %s)
+  ret void
+}
+
+define void @store_lane_i8_with_unfolded_gep_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_with_unfolded_gep_offset:
+; CHECK:         .functype store_lane_i8_with_unfolded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i8, i8* %p, i32 6
+  tail call void @llvm.wasm.store8.lane(<16 x i8> %v, i32 0, i8* %s)
+  ret void
+}
+
+define void @store_lane_i8_to_numeric_address(<16 x i8> %v) {
+; CHECK-LABEL: store_lane_i8_to_numeric_address:
+; CHECK:         .functype store_lane_i8_to_numeric_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i8*
+  tail call void @llvm.wasm.store8.lane(<16 x i8> %v, i32 0, i8* %s)
+  ret void
+}
+
+define void @store_lane_i8_from_global_address(<16 x i8> %v) {
+; CHECK-LABEL: store_lane_i8_from_global_address:
+; CHECK:         .functype store_lane_i8_from_global_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i8
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store8.lane(<16 x i8> %v, i32 0, i8* @gv_i8)
+  ret void
+}
+
+;===----------------------------------------------------------------------------
+; v128.load16_lane / v128.store16_lane
+;===----------------------------------------------------------------------------
+
+define <8 x i16> @load_lane_i16_no_offset(i16* %p) {
+; CHECK-LABEL: load_lane_i16_no_offset:
+; CHECK:         .functype load_lane_i16_no_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %v = tail call <8 x i16> @llvm.wasm.load16.lane(i32 0, i16* %p)
+  ret <8 x i16> %v
+}
+
+define <8 x i16> @load_lane_i16_with_folded_offset(i16* %p) {
+; CHECK-LABEL: load_lane_i16_with_folded_offset:
+; CHECK:         .functype load_lane_i16_with_folded_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i16* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i16*
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i32 0, i16* %s)
+  ret <8 x i16> %t
+}
+
+define <8 x i16> @load_lane_i16_with_folded_gep_offset(i16* %p) {
+; CHECK-LABEL: load_lane_i16_with_folded_gep_offset:
+; CHECK:         .functype load_lane_i16_with_folded_gep_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i16, i16* %p, i32 6
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i32 0, i16* %s)
+  ret <8 x i16> %t
+}
+
+define <8 x i16> @load_lane_i16_with_unfolded_gep_negative_offset(i16* %p) {
+; CHECK-LABEL: load_lane_i16_with_unfolded_gep_negative_offset:
+; CHECK:         .functype load_lane_i16_with_unfolded_gep_negative_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const -12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i16, i16* %p, i32 -6
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i32 0, i16* %s)
+  ret <8 x i16> %t
+}
+
+define <8 x i16> @load_lane_i16_with_unfolded_offset(i16* %p) {
+; CHECK-LABEL: load_lane_i16_with_unfolded_offset:
+; CHECK:         .functype load_lane_i16_with_unfolded_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i16* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i16*
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i32 0, i16* %s)
+  ret <8 x i16> %t
+}
+
+define <8 x i16> @load_lane_i16_with_unfolded_gep_offset(i16* %p) {
+; CHECK-LABEL: load_lane_i16_with_unfolded_gep_offset:
+; CHECK:         .functype load_lane_i16_with_unfolded_gep_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i16, i16* %p, i32 6
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i32 0, i16* %s)
+  ret <8 x i16> %t
+}
+
+define <8 x i16> @load_lane_i16_from_numeric_address() {
+; CHECK-LABEL: load_lane_i16_from_numeric_address:
+; CHECK:         .functype load_lane_i16_from_numeric_address () -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i16*
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i32 0, i16* %s)
+  ret <8 x i16> %t
+}
+
+@gv_i16 = global i16 0
+define <8 x i16> @load_lane_i16_from_global_address() {
+; CHECK-LABEL: load_lane_i16_from_global_address:
+; CHECK:         .functype load_lane_i16_from_global_address () -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i16
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i32 0, i16* @gv_i16)
+  ret <8 x i16> %t
+}
+
+define void @store_lane_i16_no_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_no_offset:
+; CHECK:         .functype store_lane_i16_no_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store16.lane(<8 x i16> %v, i32 0, i16* %p)
+  ret void
+}
+
+define void @store_lane_i16_with_folded_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_with_folded_offset:
+; CHECK:         .functype store_lane_i16_with_folded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i16* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i16*
+  tail call void @llvm.wasm.store16.lane(<8 x i16> %v, i32 0, i16* %s)
+  ret void
+}
+
+define void @store_lane_i16_with_folded_gep_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_with_folded_gep_offset:
+; CHECK:         .functype store_lane_i16_with_folded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i16, i16* %p, i32 6
+  tail call void @llvm.wasm.store16.lane(<8 x i16> %v, i32 0, i16* %s)
+  ret void
+}
+
+define void @store_lane_i16_with_unfolded_gep_negative_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_with_unfolded_gep_negative_offset:
+; CHECK:         .functype store_lane_i16_with_unfolded_gep_negative_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const -12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i16, i16* %p, i32 -6
+  tail call void @llvm.wasm.store16.lane(<8 x i16> %v, i32 0, i16* %s)
+  ret void
+}
+
+define void @store_lane_i16_with_unfolded_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_with_unfolded_offset:
+; CHECK:         .functype store_lane_i16_with_unfolded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i16* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i16*
+  tail call void @llvm.wasm.store16.lane(<8 x i16> %v, i32 0, i16* %s)
+  ret void
+}
+
+define void @store_lane_i16_with_unfolded_gep_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_with_unfolded_gep_offset:
+; CHECK:         .functype store_lane_i16_with_unfolded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i16, i16* %p, i32 6
+  tail call void @llvm.wasm.store16.lane(<8 x i16> %v, i32 0, i16* %s)
+  ret void
+}
+
+define void @store_lane_i16_to_numeric_address(<8 x i16> %v) {
+; CHECK-LABEL: store_lane_i16_to_numeric_address:
+; CHECK:         .functype store_lane_i16_to_numeric_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i16*
+  tail call void @llvm.wasm.store16.lane(<8 x i16> %v, i32 0, i16* %s)
+  ret void
+}
+
+define void @store_lane_i16_from_global_address(<8 x i16> %v) {
+; CHECK-LABEL: store_lane_i16_from_global_address:
+; CHECK:         .functype store_lane_i16_from_global_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i16
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store16.lane(<8 x i16> %v, i32 0, i16* @gv_i16)
+  ret void
+}
+
+;===----------------------------------------------------------------------------
+; v128.load32_lane / v128.store32_lane
+;===----------------------------------------------------------------------------
+
+define <4 x i32> @load_lane_i32_no_offset(i32* %p) {
+; CHECK-LABEL: load_lane_i32_no_offset:
+; CHECK:         .functype load_lane_i32_no_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %v = tail call <4 x i32> @llvm.wasm.load32.lane(i32 0, i32* %p)
+  ret <4 x i32> %v
+}
+
+define <4 x i32> @load_lane_i32_with_folded_offset(i32* %p) {
+; CHECK-LABEL: load_lane_i32_with_folded_offset:
+; CHECK:         .functype load_lane_i32_with_folded_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i32* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i32*
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32 0, i32* %s)
+  ret <4 x i32> %t
+}
+
+define <4 x i32> @load_lane_i32_with_folded_gep_offset(i32* %p) {
+; CHECK-LABEL: load_lane_i32_with_folded_gep_offset:
+; CHECK:         .functype load_lane_i32_with_folded_gep_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i32, i32* %p, i32 6
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32 0, i32* %s)
+  ret <4 x i32> %t
+}
+
+define <4 x i32> @load_lane_i32_with_unfolded_gep_negative_offset(i32* %p) {
+; CHECK-LABEL: load_lane_i32_with_unfolded_gep_negative_offset:
+; CHECK:         .functype load_lane_i32_with_unfolded_gep_negative_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const -24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i32, i32* %p, i32 -6
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32 0, i32* %s)
+  ret <4 x i32> %t
+}
+
+define <4 x i32> @load_lane_i32_with_unfolded_offset(i32* %p) {
+; CHECK-LABEL: load_lane_i32_with_unfolded_offset:
+; CHECK:         .functype load_lane_i32_with_unfolded_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i32* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i32*
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32 0, i32* %s)
+  ret <4 x i32> %t
+}
+
+define <4 x i32> @load_lane_i32_with_unfolded_gep_offset(i32* %p) {
+; CHECK-LABEL: load_lane_i32_with_unfolded_gep_offset:
+; CHECK:         .functype load_lane_i32_with_unfolded_gep_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i32, i32* %p, i32 6
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32 0, i32* %s)
+  ret <4 x i32> %t
+}
+
+define <4 x i32> @load_lane_i32_from_numeric_address() {
+; CHECK-LABEL: load_lane_i32_from_numeric_address:
+; CHECK:         .functype load_lane_i32_from_numeric_address () -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i32*
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32 0, i32* %s)
+  ret <4 x i32> %t
+}
+
+@gv_i32 = global i32 0
+define <4 x i32> @load_lane_i32_from_global_address() {
+; CHECK-LABEL: load_lane_i32_from_global_address:
+; CHECK:         .functype load_lane_i32_from_global_address () -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i32
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32 0, i32* @gv_i32)
+  ret <4 x i32> %t
+}
+
+define void @store_lane_i32_no_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_no_offset:
+; CHECK:         .functype store_lane_i32_no_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store32.lane(<4 x i32> %v, i32 0, i32* %p)
+  ret void
+}
+
+define void @store_lane_i32_with_folded_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_with_folded_offset:
+; CHECK:         .functype store_lane_i32_with_folded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i32* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i32*
+  tail call void @llvm.wasm.store32.lane(<4 x i32> %v, i32 0, i32* %s)
+  ret void
+}
+
+define void @store_lane_i32_with_folded_gep_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_with_folded_gep_offset:
+; CHECK:         .functype store_lane_i32_with_folded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i32, i32* %p, i32 6
+  tail call void @llvm.wasm.store32.lane(<4 x i32> %v, i32 0, i32* %s)
+  ret void
+}
+
+define void @store_lane_i32_with_unfolded_gep_negative_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_with_unfolded_gep_negative_offset:
+; CHECK:         .functype store_lane_i32_with_unfolded_gep_negative_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const -24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i32, i32* %p, i32 -6
+  tail call void @llvm.wasm.store32.lane(<4 x i32> %v, i32 0, i32* %s)
+  ret void
+}
+
+define void @store_lane_i32_with_unfolded_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_with_unfolded_offset:
+; CHECK:         .functype store_lane_i32_with_unfolded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i32* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i32*
+  tail call void @llvm.wasm.store32.lane(<4 x i32> %v, i32 0, i32* %s)
+  ret void
+}
+
+define void @store_lane_i32_with_unfolded_gep_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_with_unfolded_gep_offset:
+; CHECK:         .functype store_lane_i32_with_unfolded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i32, i32* %p, i32 6
+  tail call void @llvm.wasm.store32.lane(<4 x i32> %v, i32 0, i32* %s)
+  ret void
+}
+
+define void @store_lane_i32_to_numeric_address(<4 x i32> %v) {
+; CHECK-LABEL: store_lane_i32_to_numeric_address:
+; CHECK:         .functype store_lane_i32_to_numeric_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i32*
+  tail call void @llvm.wasm.store32.lane(<4 x i32> %v, i32 0, i32* %s)
+  ret void
+}
+
+define void @store_lane_i32_from_global_address(<4 x i32> %v) {
+; CHECK-LABEL: store_lane_i32_from_global_address:
+; CHECK:         .functype store_lane_i32_from_global_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i32
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store32.lane(<4 x i32> %v, i32 0, i32* @gv_i32)
+  ret void
+}
+
+;===----------------------------------------------------------------------------
+; v128.load64_lane / v128.store64_lane
+;===----------------------------------------------------------------------------
+
+define <2 x i64> @load_lane_i64_no_offset(i64* %p) {
+; CHECK-LABEL: load_lane_i64_no_offset:
+; CHECK:         .functype load_lane_i64_no_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %v = tail call <2 x i64> @llvm.wasm.load64.lane(i32 0, i64* %p)
+  ret <2 x i64> %v
+}
+
+define <2 x i64> @load_lane_i64_with_folded_offset(i64* %p) {
+; CHECK-LABEL: load_lane_i64_with_folded_offset:
+; CHECK:         .functype load_lane_i64_with_folded_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i64* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i64*
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i32 0, i64* %s)
+  ret <2 x i64> %t
+}
+
+define <2 x i64> @load_lane_i64_with_folded_gep_offset(i64* %p) {
+; CHECK-LABEL: load_lane_i64_with_folded_gep_offset:
+; CHECK:         .functype load_lane_i64_with_folded_gep_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i64, i64* %p, i32 6
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i32 0, i64* %s)
+  ret <2 x i64> %t
+}
+
+define <2 x i64> @load_lane_i64_with_unfolded_gep_negative_offset(i64* %p) {
+; CHECK-LABEL: load_lane_i64_with_unfolded_gep_negative_offset:
+; CHECK:         .functype load_lane_i64_with_unfolded_gep_negative_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const -48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i64, i64* %p, i32 -6
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i32 0, i64* %s)
+  ret <2 x i64> %t
+}
+
+define <2 x i64> @load_lane_i64_with_unfolded_offset(i64* %p) {
+; CHECK-LABEL: load_lane_i64_with_unfolded_offset:
+; CHECK:         .functype load_lane_i64_with_unfolded_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i64* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i64*
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i32 0, i64* %s)
+  ret <2 x i64> %t
+}
+
+define <2 x i64> @load_lane_i64_with_unfolded_gep_offset(i64* %p) {
+; CHECK-LABEL: load_lane_i64_with_unfolded_gep_offset:
+; CHECK:         .functype load_lane_i64_with_unfolded_gep_offset (i32) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i64, i64* %p, i32 6
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i32 0, i64* %s)
+  ret <2 x i64> %t
+}
+
+define <2 x i64> @load_lane_i64_from_numeric_address() {
+; CHECK-LABEL: load_lane_i64_from_numeric_address:
+; CHECK:         .functype load_lane_i64_from_numeric_address () -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i64*
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i32 0, i64* %s)
+  ret <2 x i64> %t
+}
+
+@gv_i64 = global i64 0
+define <2 x i64> @load_lane_i64_from_global_address() {
+; CHECK-LABEL: load_lane_i64_from_global_address:
+; CHECK:         .functype load_lane_i64_from_global_address () -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i64
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i32 0, i64* @gv_i64)
+  ret <2 x i64> %t
+}
+
+define void @store_lane_i64_no_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_no_offset:
+; CHECK:         .functype store_lane_i64_no_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store64.lane(<2 x i64> %v, i32 0, i64* %p)
+  ret void
+}
+
+define void @store_lane_i64_with_folded_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_with_folded_offset:
+; CHECK:         .functype store_lane_i64_with_folded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i64* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i64*
+  tail call void @llvm.wasm.store64.lane(<2 x i64> %v, i32 0, i64* %s)
+  ret void
+}
+
+define void @store_lane_i64_with_folded_gep_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_with_folded_gep_offset:
+; CHECK:         .functype store_lane_i64_with_folded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i64, i64* %p, i32 6
+  tail call void @llvm.wasm.store64.lane(<2 x i64> %v, i32 0, i64* %s)
+  ret void
+}
+
+define void @store_lane_i64_with_unfolded_gep_negative_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_with_unfolded_gep_negative_offset:
+; CHECK:         .functype store_lane_i64_with_unfolded_gep_negative_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const -48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i64, i64* %p, i32 -6
+  tail call void @llvm.wasm.store64.lane(<2 x i64> %v, i32 0, i64* %s)
+  ret void
+}
+
+define void @store_lane_i64_with_unfolded_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_with_unfolded_offset:
+; CHECK:         .functype store_lane_i64_with_unfolded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i64* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i64*
+  tail call void @llvm.wasm.store64.lane(<2 x i64> %v, i32 0, i64* %s)
+  ret void
+}
+
+define void @store_lane_i64_with_unfolded_gep_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_with_unfolded_gep_offset:
+; CHECK:         .functype store_lane_i64_with_unfolded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i64, i64* %p, i32 6
+  tail call void @llvm.wasm.store64.lane(<2 x i64> %v, i32 0, i64* %s)
+  ret void
+}
+
+define void @store_lane_i64_to_numeric_address(<2 x i64> %v) {
+; CHECK-LABEL: store_lane_i64_to_numeric_address:
+; CHECK:         .functype store_lane_i64_to_numeric_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i64*
+  tail call void @llvm.wasm.store64.lane(<2 x i64> %v, i32 0, i64* %s)
+  ret void
+}
+
+define void @store_lane_i64_from_global_address(<2 x i64> %v) {
+; CHECK-LABEL: store_lane_i64_from_global_address:
+; CHECK:         .functype store_lane_i64_from_global_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i64
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store64.lane(<2 x i64> %v, i32 0, i64* @gv_i64)
+  ret void
+}
Index: llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
===================================================================
--- llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
+++ llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
@@ -53,7 +53,7 @@
          "v128.load\t$off$p2align", 0>;
 }
 
-// Def load and store patterns from WebAssemblyInstrMemory.td for vector types
+// Def load patterns from WebAssemblyInstrMemory.td for vector types
 foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in {
 defm : LoadPatNoOffset<vec_t, load, "LOAD_V128">;
 defm : LoadPatImmOff<vec_t, load, regPlusImm, "LOAD_V128">;
@@ -201,6 +201,51 @@
 defm : LoadPatGlobalAddrOffOnly<v4i32, int_wasm_load32_zero, "LOAD_ZERO_v4i32">;
 defm : LoadPatGlobalAddrOffOnly<v2i64, int_wasm_load64_zero, "LOAD_ZERO_v2i64">;
 
+// Load lane
+multiclass SIMDLoadLane<ValueType vec_t, string name, bits<32> simdop> {
+  let mayLoad = 1, UseNamedOperandTable = 1 in {
+  defm LOAD_LANE_#vec_t#_A32 :
+    SIMD_I<(outs V128:$dst),
+           (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx,
+                I32:$addr),
+           (outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx),
+           [], name#"\t$dst, ${off}(${addr})$p2align, $idx",
+           name#"\t$off$p2align, $idx", simdop>;
+  defm LOAD_LANE_#vec_t#_A64 :
+    SIMD_I<(outs V128:$dst),
+           (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx,
+                I64:$addr),
+           (outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx),
+           [], name#"\t$dst, ${off}(${addr})$p2align, $idx",
+           name#"\t$off$p2align, $idx", simdop>;
+  } // mayLoad = 1, UseNamedOperandTable = 1
+}
+
+// TODO: Also support v4f32 and v2f64 once the instructions are merged
+// to the proposal
+defm "" : SIMDLoadLane<v16i8, "v128.load8_lane", 88>;
+defm "" : SIMDLoadLane<v8i16, "v128.load16_lane", 89>;
+defm "" : SIMDLoadLane<v4i32, "v128.load32_lane", 90>;
+defm "" : SIMDLoadLane<v2i64, "v128.load64_lane", 91>;
+
+// Select loads with no constant offset.
+multiclass LoadLanePatNoOffset<ValueType ty, PatFrag kind, ImmLeaf lane_imm_t> {
+  def : Pat<(ty (kind (i32 lane_imm_t:$idx), (i32 I32:$addr))),
+            (!cast<NI>("LOAD_LANE_"#ty#"_A32") 0, 0, imm:$idx, I32:$addr)>,
+        Requires<[HasAddr32]>;
+  def : Pat<(ty (kind (i32 lane_imm_t:$idx), (i64 I64:$addr))),
+            (!cast<NI>("LOAD_LANE_"#ty#"_A64") 0, 0, imm:$idx, I64:$addr)>,
+        Requires<[HasAddr64]>;
+}
+
+defm : LoadLanePatNoOffset<v16i8, int_wasm_load8_lane, LaneIdx16>;
+defm : LoadLanePatNoOffset<v8i16, int_wasm_load16_lane, LaneIdx8>;
+defm : LoadLanePatNoOffset<v4i32, int_wasm_load32_lane, LaneIdx4>;
+defm : LoadLanePatNoOffset<v2i64, int_wasm_load64_lane, LaneIdx2>;
+
+// TODO: Also support the oher load patterns for load_lane once the insructions
+// are merged to the proposal.
+
 // Store: v128.store
 let mayStore = 1, UseNamedOperandTable = 1 in {
 defm STORE_V128_A32 :
@@ -214,8 +259,9 @@
          "v128.store\t${off}(${addr})$p2align, $vec",
          "v128.store\t$off$p2align", 11>;
 }
+
+// Def store patterns from WebAssemblyInstrMemory.td for vector types
 foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in {
-// Def load and store patterns from WebAssemblyInstrMemory.td for vector types
 defm : StorePatNoOffset<vec_t, store, "STORE_V128">;
 defm : StorePatImmOff<vec_t, store, regPlusImm, "STORE_V128">;
 defm : StorePatImmOff<vec_t, store, or_is_add, "STORE_V128">;
@@ -223,6 +269,50 @@
 defm : StorePatGlobalAddrOffOnly<vec_t, store, "STORE_V128">;
 }
 
+// Store lane
+multiclass SIMDStoreLane<ValueType vec_t, string name, bits<32> simdop> {
+  let mayStore = 1, UseNamedOperandTable = 1 in {
+  defm STORE_LANE_#vec_t#_A32 :
+    SIMD_I<(outs),
+           (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx,
+                I32:$addr, V128:$vec),
+           (outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx),
+           [], name#"\t${off}(${addr})$p2align, $idx, $vec",
+           name#"\t$off$p2align, $idx", simdop>;
+  defm STORE_LANE_#vec_t#_A64 :
+    SIMD_I<(outs V128:$dst),
+           (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx,
+                I64:$addr, V128:$vec),
+           (outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx),
+           [], name#"\t${off}(${addr})$p2align, $idx, $vec",
+           name#"\t$off$p2align, $idx", simdop>;
+  } // mayStore = 1, UseNamedOperandTable = 1
+}
+
+// TODO: Also support v4f32 and v2f64 once the instructions are merged
+// to the proposal
+defm "" : SIMDStoreLane<v16i8, "v128.store8_lane", 92>;
+defm "" : SIMDStoreLane<v8i16, "v128.store16_lane", 93>;
+defm "" : SIMDStoreLane<v4i32, "v128.store32_lane", 94>;
+defm "" : SIMDStoreLane<v2i64, "v128.store64_lane", 95>;
+
+// Select stores with no constant offset.
+multiclass StoreLanePatNoOffset<ValueType ty, PatFrag kind, ImmLeaf lane_imm_t> {
+  def : Pat<(kind (ty V128:$vec), (i32 lane_imm_t:$idx), (i32 I32:$addr)),
+            (!cast<NI>("STORE_LANE_"#ty#"_A32")
+              0, 0, imm:$idx, I32:$addr, ty:$vec)>,
+        Requires<[HasAddr32]>;
+  def : Pat<(kind  (ty V128:$vec), (i32 lane_imm_t:$idx), (i32 I64:$addr)),
+            (!cast<NI>("STORE_LANE_"#ty#"_A64")
+              0, 0, imm:$idx, I64:$addr, ty:$vec)>,
+        Requires<[HasAddr64]>;
+}
+
+defm : StoreLanePatNoOffset<v16i8, int_wasm_store8_lane, LaneIdx16>;
+defm : StoreLanePatNoOffset<v8i16, int_wasm_store16_lane, LaneIdx8>;
+defm : StoreLanePatNoOffset<v4i32, int_wasm_store32_lane, LaneIdx4>;
+defm : StoreLanePatNoOffset<v2i64, int_wasm_store64_lane, LaneIdx2>;
+
 //===----------------------------------------------------------------------===//
 // Constructing SIMD values
 //===----------------------------------------------------------------------===//
Index: llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
===================================================================
--- llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -685,6 +685,57 @@
     Info.align = Info.memVT == MVT::i32 ? Align(4) : Align(8);
     Info.flags = MachineMemOperand::MOLoad;
     return true;
+  case Intrinsic::wasm_load8_lane:
+  case Intrinsic::wasm_load16_lane:
+  case Intrinsic::wasm_load32_lane:
+  case Intrinsic::wasm_load64_lane:
+  case Intrinsic::wasm_store8_lane:
+  case Intrinsic::wasm_store16_lane:
+  case Intrinsic::wasm_store32_lane:
+  case Intrinsic::wasm_store64_lane: {
+    MVT MemVT;
+    Align MemAlign;
+    switch (Intrinsic) {
+    case Intrinsic::wasm_load8_lane:
+    case Intrinsic::wasm_store8_lane:
+      MemVT = MVT::i8;
+      MemAlign = Align(1);
+      break;
+    case Intrinsic::wasm_load16_lane:
+    case Intrinsic::wasm_store16_lane:
+      MemVT = MVT::i16;
+      MemAlign = Align(2);
+      break;
+    case Intrinsic::wasm_load32_lane:
+    case Intrinsic::wasm_store32_lane:
+      MemVT = MVT::i32;
+      MemAlign = Align(4);
+      break;
+    case Intrinsic::wasm_load64_lane:
+    case Intrinsic::wasm_store64_lane:
+      MemVT = MVT::i64;
+      MemAlign = Align(8);
+      break;
+    default:
+      llvm_unreachable("unexpected intrinsic");
+    }
+    if (Intrinsic == Intrinsic::wasm_load8_lane ||
+        Intrinsic == Intrinsic::wasm_load16_lane ||
+        Intrinsic == Intrinsic::wasm_load32_lane ||
+        Intrinsic == Intrinsic::wasm_load64_lane) {
+      Info.opc = ISD::INTRINSIC_W_CHAIN;
+      Info.flags = MachineMemOperand::MOLoad;
+      Info.ptrVal = I.getArgOperand(1);
+    } else {
+      Info.opc = ISD::INTRINSIC_VOID;
+      Info.flags = MachineMemOperand::MOStore;
+      Info.ptrVal = I.getArgOperand(2);
+    }
+    Info.memVT = MemVT;
+    Info.offset = 0;
+    Info.align = MemAlign;
+    return true;
+  }
   default:
     return false;
   }
Index: llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
===================================================================
--- llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
+++ llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
@@ -177,6 +177,8 @@
   WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I32)
   WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I64)
   WASM_LOAD_STORE(LOAD_SPLAT_v8x16)
+  WASM_LOAD_STORE(LOAD_LANE_v16i8)
+  WASM_LOAD_STORE(STORE_LANE_v16i8)
     return 0;
   WASM_LOAD_STORE(LOAD16_S_I32)
   WASM_LOAD_STORE(LOAD16_U_I32)
@@ -203,6 +205,8 @@
   WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I32)
   WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I64)
   WASM_LOAD_STORE(LOAD_SPLAT_v16x8)
+  WASM_LOAD_STORE(LOAD_LANE_v8i16)
+  WASM_LOAD_STORE(STORE_LANE_v8i16)
     return 1;
   WASM_LOAD_STORE(LOAD_I32)
   WASM_LOAD_STORE(LOAD_F32)
@@ -233,6 +237,8 @@
   WASM_LOAD_STORE(ATOMIC_WAIT_I32)
   WASM_LOAD_STORE(LOAD_SPLAT_v32x4)
   WASM_LOAD_STORE(LOAD_ZERO_v4i32)
+  WASM_LOAD_STORE(LOAD_LANE_v4i32)
+  WASM_LOAD_STORE(STORE_LANE_v4i32)
     return 2;
   WASM_LOAD_STORE(LOAD_I64)
   WASM_LOAD_STORE(LOAD_F64)
@@ -256,6 +262,8 @@
   WASM_LOAD_STORE(LOAD_EXTEND_S_v2i64)
   WASM_LOAD_STORE(LOAD_EXTEND_U_v2i64)
   WASM_LOAD_STORE(LOAD_ZERO_v2i64)
+  WASM_LOAD_STORE(LOAD_LANE_v2i64)
+  WASM_LOAD_STORE(STORE_LANE_v2i64)
     return 3;
   WASM_LOAD_STORE(LOAD_V128)
   WASM_LOAD_STORE(STORE_V128)
Index: llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
===================================================================
--- llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
+++ llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
@@ -419,6 +419,12 @@
           return error("Expected integer constant");
         parseSingleInteger(false, Operands);
       } else {
+        // v128.{load,store}{8,16,32,64}_lane has both a memarg and a lane
+        // index. We need to avoid parsing an extra alignment operand for the
+        // lane index.
+        auto IsLoadStoreLane = InstName.find("_lane") != StringRef::npos;
+        if (IsLoadStoreLane && Operands.size() == 4)
+          return false;
         // Alignment not specified (or atomics, must use default alignment).
         // We can't just call WebAssembly::GetDefaultP2Align since we don't have
         // an opcode until after the assembly matcher, so set a default to fix
Index: llvm/include/llvm/IR/IntrinsicsWebAssembly.td
===================================================================
--- llvm/include/llvm/IR/IntrinsicsWebAssembly.td
+++ llvm/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -208,6 +208,52 @@
             [IntrReadMem, IntrArgMemOnly],
              "", [SDNPMemOperand]>;
 
+// These intrinsics do not mark their lane index arguments as immediate because
+// that changes the corresponding SDNode from ISD::Constant to
+// ISD::TargetConstant, which would require extra complications in the ISel
+// tablegen patterns. TODO: Replace these intrinsic with normal ISel patterns
+// once the load_lane instructions are merged to the proposal.
+def int_wasm_load8_lane :
+  Intrinsic<[llvm_v16i8_ty],
+            [llvm_i32_ty, LLVMPointerType<llvm_i8_ty>],
+            [IntrReadMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_load16_lane :
+  Intrinsic<[llvm_v8i16_ty],
+            [llvm_i32_ty, LLVMPointerType<llvm_i16_ty>],
+            [IntrReadMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_load32_lane :
+  Intrinsic<[llvm_v4i32_ty],
+            [llvm_i32_ty, LLVMPointerType<llvm_i32_ty>],
+            [IntrReadMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_load64_lane :
+  Intrinsic<[llvm_v2i64_ty],
+            [llvm_i32_ty, LLVMPointerType<llvm_i64_ty>],
+            [IntrReadMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_store8_lane :
+  Intrinsic<[],
+            [llvm_v16i8_ty, llvm_i32_ty, LLVMPointerType<llvm_i8_ty>],
+            [IntrWriteMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_store16_lane :
+  Intrinsic<[],
+            [llvm_v8i16_ty, llvm_i32_ty, LLVMPointerType<llvm_i16_ty>],
+            [IntrWriteMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_store32_lane :
+  Intrinsic<[],
+            [llvm_v4i32_ty, llvm_i32_ty, LLVMPointerType<llvm_i32_ty>],
+            [IntrWriteMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_store64_lane :
+  Intrinsic<[],
+            [llvm_v2i64_ty, llvm_i32_ty, LLVMPointerType<llvm_i64_ty>],
+            [IntrWriteMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+
 //===----------------------------------------------------------------------===//
 // Thread-local storage intrinsics
 //===----------------------------------------------------------------------===//
Index: clang/test/CodeGen/builtins-wasm.c
===================================================================
--- clang/test/CodeGen/builtins-wasm.c
+++ clang/test/CodeGen/builtins-wasm.c
@@ -284,6 +284,62 @@
   // WEBASSEMBLY-NEXT: ret
 }
 
+i8x16 load8_lane(signed char *p) {
+  return __builtin_wasm_load8_lane(0, p);
+  // WEBASSEMBLY: call <16 x i8> @llvm.wasm.load8.lane(
+  // WEBASSEMBLY-SAME: i32 0, i8* %p)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+i16x8 load16_lane(short *p) {
+  return __builtin_wasm_load16_lane(0, p);
+  // WEBASSEMBLY: call <8 x i16> @llvm.wasm.load16.lane(
+  // WEBASSEMBLY-SAME: i32 0, i16* %p)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+i32x4 load32_lane(int *p) {
+  return __builtin_wasm_load32_lane(0, p);
+  // WEBASSEMBLY: call <4 x i32> @llvm.wasm.load32.lane(
+  // WEBASSEMBLY-SAME: i32 0, i32* %p)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+i64x2 load64_lane(long long *p) {
+  return __builtin_wasm_load64_lane(0, p);
+  // WEBASSEMBLY: call <2 x i64> @llvm.wasm.load64.lane(
+  // WEBASSEMBLY-SAME: i32 0, i64* %p)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+void store8_lane(i8x16 v, signed char *p) {
+  return __builtin_wasm_store8_lane(v, 0, p);
+  // WEBASSEMBLY: call void @llvm.wasm.store8.lane(
+  // WEBASSEMBLY-SAME: <16 x i8> %v, i32 0, i8* %p)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+void store16_lane(i16x8 v, short *p) {
+  return __builtin_wasm_store16_lane(v, 0, p);
+  // WEBASSEMBLY: call void @llvm.wasm.store16.lane(
+  // WEBASSEMBLY-SAME: <8 x i16> %v, i32 0, i16* %p)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+void store32_lane(i32x4 v, int *p) {
+  return __builtin_wasm_store32_lane(v, 0, p);
+  // WEBASSEMBLY: call void @llvm.wasm.store32.lane(
+  // WEBASSEMBLY-SAME: <4 x i32> %v, i32 0, i32* %p)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+void store64_lane(i64x2 v, long long *p) {
+  return __builtin_wasm_store64_lane(v, 0, p);
+  // WEBASSEMBLY: call void @llvm.wasm.store64.lane(
+  // WEBASSEMBLY-SAME: <2 x i64> %v, i32 0, i64* %p)
+  // WEBASSEMBLY-NEXT: ret
+}
+
 i8x16 add_saturate_s_i8x16(i8x16 x, i8x16 y) {
   return __builtin_wasm_add_saturate_s_i8x16(x, y);
   // WEBASSEMBLY: call <16 x i8> @llvm.sadd.sat.v16i8(
Index: clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- clang/lib/CodeGen/CGBuiltin.cpp
+++ clang/lib/CodeGen/CGBuiltin.cpp
@@ -16721,6 +16721,65 @@
     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load64_zero);
     return Builder.CreateCall(Callee, {Ptr});
   }
+  case WebAssembly::BI__builtin_wasm_load8_lane:
+  case WebAssembly::BI__builtin_wasm_load16_lane:
+  case WebAssembly::BI__builtin_wasm_load32_lane:
+  case WebAssembly::BI__builtin_wasm_load64_lane: {
+    Optional<llvm::APSInt> LaneIdxConst =
+        E->getArg(0)->getIntegerConstantExpr(getContext());
+    assert(LaneIdxConst && "Constant arg isn't actually constant?");
+    Value *LaneIdx = llvm::ConstantInt::get(getLLVMContext(), *LaneIdxConst);
+    Value *Ptr = EmitScalarExpr(E->getArg(1));
+    unsigned IntNo;
+    switch (BuiltinID) {
+    case WebAssembly::BI__builtin_wasm_load8_lane:
+      IntNo = Intrinsic::wasm_load8_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_load16_lane:
+      IntNo = Intrinsic::wasm_load16_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_load32_lane:
+      IntNo = Intrinsic::wasm_load32_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_load64_lane:
+      IntNo = Intrinsic::wasm_load64_lane;
+      break;
+    default:
+      llvm_unreachable("unexpected builtin ID");
+    }
+    Function *Callee = CGM.getIntrinsic(IntNo);
+    return Builder.CreateCall(Callee, {LaneIdx, Ptr});
+  }
+  case WebAssembly::BI__builtin_wasm_store8_lane:
+  case WebAssembly::BI__builtin_wasm_store16_lane:
+  case WebAssembly::BI__builtin_wasm_store32_lane:
+  case WebAssembly::BI__builtin_wasm_store64_lane: {
+    Value *Vec = EmitScalarExpr(E->getArg(0));
+    Optional<llvm::APSInt> LaneIdxConst =
+        E->getArg(1)->getIntegerConstantExpr(getContext());
+    assert(LaneIdxConst && "Constant arg isn't actually constant?");
+    Value *LaneIdx = llvm::ConstantInt::get(getLLVMContext(), *LaneIdxConst);
+    Value *Ptr = EmitScalarExpr(E->getArg(2));
+    unsigned IntNo;
+    switch (BuiltinID) {
+    case WebAssembly::BI__builtin_wasm_store8_lane:
+      IntNo = Intrinsic::wasm_store8_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_store16_lane:
+      IntNo = Intrinsic::wasm_store16_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_store32_lane:
+      IntNo = Intrinsic::wasm_store32_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_store64_lane:
+      IntNo = Intrinsic::wasm_store64_lane;
+      break;
+    default:
+      llvm_unreachable("unexpected builtin ID");
+    }
+    Function *Callee = CGM.getIntrinsic(IntNo);
+    return Builder.CreateCall(Callee, {Vec, LaneIdx, Ptr});
+  }
   case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
     Value *Ops[18];
     size_t OpIdx = 0;
Index: clang/include/clang/Basic/BuiltinsWebAssembly.def
===================================================================
--- clang/include/clang/Basic/BuiltinsWebAssembly.def
+++ clang/include/clang/Basic/BuiltinsWebAssembly.def
@@ -174,5 +174,14 @@
 TARGET_BUILTIN(__builtin_wasm_load32_zero, "V4ii*", "nU", "simd128")
 TARGET_BUILTIN(__builtin_wasm_load64_zero, "V2LLiLLi*", "nU", "simd128")
 
+TARGET_BUILTIN(__builtin_wasm_load8_lane, "V16ScIiSc*", "nU", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load16_lane, "V8sIis*", "nU", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load32_lane, "V4iIii*", "nU", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load64_lane, "V2LLiIiLLi*", "nU", "simd128")
+TARGET_BUILTIN(__builtin_wasm_store8_lane, "vV16ScIiSc*", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_store16_lane, "vV8sIis*", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_store32_lane, "vV4iIii*", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_store64_lane, "vV2LLiIiLLi*", "n", "simd128")
+
 #undef BUILTIN
 #undef TARGET_BUILTIN
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to