Enable generalized fetch jit with 8 or 16 wide SIMD target. Still some
work needed to remove some simd8 double pumping for 16-wide target.

Also removed unused non-gather load vertices path.
---
 .../drivers/swr/rasterizer/jitter/builder.cpp      |   26 +-
 .../drivers/swr/rasterizer/jitter/builder.h        |    4 +-
 .../drivers/swr/rasterizer/jitter/builder_mem.cpp  |   69 +-
 .../drivers/swr/rasterizer/jitter/fetch_jit.cpp    | 1197 +++-----------------
 .../drivers/swr/rasterizer/jitter/fetch_jit.h      |    6 +-
 5 files changed, 169 insertions(+), 1133 deletions(-)

diff --git a/src/gallium/drivers/swr/rasterizer/jitter/builder.cpp 
b/src/gallium/drivers/swr/rasterizer/jitter/builder.cpp
index 625f132..53947c3 100644
--- a/src/gallium/drivers/swr/rasterizer/jitter/builder.cpp
+++ b/src/gallium/drivers/swr/rasterizer/jitter/builder.cpp
@@ -66,16 +66,7 @@ namespace SwrJit
         mSimd4FP64Ty = VectorType::get(mDoubleTy, 4);
 
         // Built in types: target simd
-
-        mSimdInt1Ty     = VectorType::get(mInt1Ty,  mVWidth);
-        mSimdInt16Ty    = VectorType::get(mInt16Ty, mVWidth);
-        mSimdInt32Ty    = VectorType::get(mInt32Ty, mVWidth);
-        mSimdInt64Ty    = VectorType::get(mInt64Ty, mVWidth);
-        mSimdFP16Ty     = VectorType::get(mFP16Ty,  mVWidth);
-        mSimdFP32Ty     = VectorType::get(mFP32Ty,  mVWidth);
-        mSimdVectorTy   = ArrayType::get(mSimdFP32Ty, 4);
-        mSimdVectorIntTy= ArrayType::get(mSimdInt32Ty, 4);
-        mSimdVectorTRTy = ArrayType::get(mSimdFP32Ty, 5);
+        SetTargetWidth(pJitMgr->mVWidth);
 
         // Built in types: simd16
 
@@ -105,4 +96,19 @@ namespace SwrJit
             mSimd16IntPtrTy = mSimd16Int64Ty;
         }
     }
+
+    void Builder::SetTargetWidth(uint32_t width)
+    {
+        mVWidth = width;
+
+        mSimdInt1Ty = VectorType::get(mInt1Ty, mVWidth);
+        mSimdInt16Ty = VectorType::get(mInt16Ty, mVWidth);
+        mSimdInt32Ty = VectorType::get(mInt32Ty, mVWidth);
+        mSimdInt64Ty = VectorType::get(mInt64Ty, mVWidth);
+        mSimdFP16Ty = VectorType::get(mFP16Ty, mVWidth);
+        mSimdFP32Ty = VectorType::get(mFP32Ty, mVWidth);
+        mSimdVectorTy = ArrayType::get(mSimdFP32Ty, 4);
+        mSimdVectorIntTy = ArrayType::get(mSimdInt32Ty, 4);
+        mSimdVectorTRTy = ArrayType::get(mSimdFP32Ty, 5);
+    }
 }
diff --git a/src/gallium/drivers/swr/rasterizer/jitter/builder.h 
b/src/gallium/drivers/swr/rasterizer/jitter/builder.h
index 6b2c9f0..4c79bab 100644
--- a/src/gallium/drivers/swr/rasterizer/jitter/builder.h
+++ b/src/gallium/drivers/swr/rasterizer/jitter/builder.h
@@ -46,7 +46,7 @@ namespace SwrJit
         JitManager *mpJitMgr;
         IRBuilder<> *mpIRBuilder;
 
-        uint32_t             mVWidth;   // vector width simd8
+        uint32_t             mVWidth;   // vector width target simd
         uint32_t             mVWidth16; // vector width simd16
 
         // Built in types: scalar
@@ -95,6 +95,8 @@ namespace SwrJit
 
         Type*                mSimd32Int8Ty;
 
+        void SetTargetWidth(uint32_t width);
+
 #include "gen_builder.hpp"
 #include "gen_builder_meta.hpp"
 #include "gen_builder_intrin.hpp"
diff --git a/src/gallium/drivers/swr/rasterizer/jitter/builder_mem.cpp 
b/src/gallium/drivers/swr/rasterizer/jitter/builder_mem.cpp
index 0550493..4840fef 100644
--- a/src/gallium/drivers/swr/rasterizer/jitter/builder_mem.cpp
+++ b/src/gallium/drivers/swr/rasterizer/jitter/builder_mem.cpp
@@ -38,6 +38,7 @@ namespace SwrJit
 {
     void Builder::AssertRastyMemoryParams(Value* ptr, JIT_MEM_CLIENT usage)
     {
+        SWR_ASSERT(ptr->getType() != mInt64Ty, "Address appears to be GFX 
access.  Requires translation through BuilderGfxMem.");
     }
 
     Value *Builder::GEP(Value* ptr, const std::initializer_list<Value*> 
&indexList)
@@ -175,78 +176,14 @@ namespace SwrJit
     {
         AssertRastyMemoryParams(pBase, usage);
 
-        Value* vGather;
-
-        // use avx2 gather instruction if available
-        if (JM()->mArch.AVX2())
-        {
-            vGather = VGATHERDD(vSrc, pBase, vIndices, VMASK(vMask), C(scale));
-        }
-        else
-        {
-            Value* pStack = STACKSAVE();
-
-            // store vSrc on the stack.  this way we can select between a 
valid load address and the vSrc address
-            Value* vSrcPtr = ALLOCA(vSrc->getType());
-            STORE(vSrc, vSrcPtr);
-
-            vGather = VUNDEF_I();
-            Value *vScaleVec = VIMMED1((uint32_t)scale);
-            Value *vOffsets = MUL(vIndices, vScaleVec);
-            for (uint32_t i = 0; i < mVWidth; ++i)
-            {
-                // single component byte index
-                Value *offset = VEXTRACT(vOffsets, C(i));
-                // byte pointer to component
-                Value *loadAddress = GEP(pBase, offset);
-                loadAddress = BITCAST(loadAddress, PointerType::get(mInt32Ty, 
0));
-                // pointer to the value to load if we're masking off a 
component
-                Value *maskLoadAddress = GEP(vSrcPtr, { C(0), C(i) });
-                Value *selMask = VEXTRACT(vMask, C(i));
-                // switch in a safe address to load if we're trying to access 
a vertex 
-                Value *validAddress = SELECT(selMask, loadAddress, 
maskLoadAddress);
-                Value *val = LOAD(validAddress, C(0));
-                vGather = VINSERT(vGather, val, C(i));
-            }
-
-            STACKRESTORE(pStack);
-        }
-
-        return vGather;
+        return VGATHERDD(vSrc, pBase, vIndices, vMask, C(scale));
     }
 
     Value *Builder::GATHERDD_16(Value *vSrc, Value *pBase, Value *vIndices, 
Value *vMask, uint8_t scale, JIT_MEM_CLIENT usage)
     {
         AssertRastyMemoryParams(pBase, usage);
 
-        Value *vGather = VUNDEF_I_16();
-
-        // use AVX512F gather instruction if available
-        if (JM()->mArch.AVX512F())
-        {
-            // force mask to <N-bit Integer>, required by vgather2
-            Value *mask = BITCAST(vMask, mInt16Ty);
-
-            vGather = VGATHERDD_16(vSrc, pBase, vIndices, mask, 
C((uint32_t)scale));
-        }
-        else
-        {
-            Value *src0 = EXTRACT_16(vSrc, 0);
-            Value *src1 = EXTRACT_16(vSrc, 1);
-
-            Value *indices0 = EXTRACT_16(vIndices, 0);
-            Value *indices1 = EXTRACT_16(vIndices, 1);
-
-            Value *mask0 = EXTRACT_16(vMask, 0);
-            Value *mask1 = EXTRACT_16(vMask, 1);
-
-            Value *gather0 = GATHERDD(src0, pBase, indices0, mask0, scale);
-            Value *gather1 = GATHERDD(src1, pBase, indices1, mask1, scale);
-
-            vGather = JOIN_16(gather0, gather1);
-        }
-
-        return vGather;
+        return VGATHERDD_16(vSrc, pBase, vIndices, vMask, C(scale));
     }
 
     //////////////////////////////////////////////////////////////////////////
diff --git a/src/gallium/drivers/swr/rasterizer/jitter/fetch_jit.cpp 
b/src/gallium/drivers/swr/rasterizer/jitter/fetch_jit.cpp
index 92c308e..c6ff6a8 100644
--- a/src/gallium/drivers/swr/rasterizer/jitter/fetch_jit.cpp
+++ b/src/gallium/drivers/swr/rasterizer/jitter/fetch_jit.cpp
@@ -49,10 +49,6 @@ enum ConversionType
     CONVERT_SFIXED,
 };
 
-#if USE_SIMD16_SHADERS
-#define USE_SIMD16_GATHERS 0
-#endif
-
 //////////////////////////////////////////////////////////////////////////
 /// Interface to Jitting a fetch shader
 //////////////////////////////////////////////////////////////////////////
@@ -73,56 +69,20 @@ struct FetchJit : public BuilderGfxMem
         uint32_t&, uint32_t&, const ComponentEnable, const 
ComponentControl(&)[4], Value*(&)[4],
         const uint32_t(&)[4]> Shuffle8bpcArgs;
 
-#if USE_SIMD16_SHADERS
-#if USE_SIMD16_GATHERS
     void Shuffle8bpcGatherd16(Shuffle8bpcArgs &args);
-#else
-    void Shuffle8bpcGatherd(Shuffle8bpcArgs &args, bool useVertexID2);
-#endif
-#else
     void Shuffle8bpcGatherd(Shuffle8bpcArgs &args);
-#endif
 
     typedef std::tuple<Value*(&)[2], Value*, const Instruction::CastOps, const 
ConversionType,
         uint32_t&, uint32_t&, const ComponentEnable, const 
ComponentControl(&)[4], Value*(&)[4]> Shuffle16bpcArgs;
 
-#if USE_SIMD16_SHADERS
-#if USE_SIMD16_GATHERS
     void Shuffle16bpcGather16(Shuffle16bpcArgs &args);
-#else
-    void Shuffle16bpcGather(Shuffle16bpcArgs &args, bool useVertexID2);
-#endif
-#else
     void Shuffle16bpcGather(Shuffle16bpcArgs &args);
-#endif
 
-#if USE_SIMD16_GATHERS
-    void StoreVertexElements16(Value* pVtxOut, const uint32_t outputElt, const 
uint32_t numEltsToStore, Value* (&vVertexElements)[4]);
-#else
     void StoreVertexElements(Value* pVtxOut, const uint32_t outputElt, const 
uint32_t numEltsToStore, Value* (&vVertexElements)[4]);
-#endif
 
-#if USE_SIMD16_SHADERS
-#if USE_SIMD16_GATHERS
-    Value *GenerateCompCtrlVector16(const ComponentControl ctrl);
-#else
-    Value *GenerateCompCtrlVector(const ComponentControl ctrl, bool 
useVertexID2);
-#endif
-#else
     Value *GenerateCompCtrlVector(const ComponentControl ctrl);
-#endif
-
-    void JitLoadVertices(const FETCH_COMPILE_STATE &fetchState, Value* 
streams, Value* vIndices, Value* pVtxOut);
 
-#if USE_SIMD16_SHADERS
-#if USE_SIMD16_GATHERS
-    void JitGatherVertices(const FETCH_COMPILE_STATE &fetchState, Value 
*streams, Value *vIndices, Value *vIndices2, Value *pVtxOut, bool useVertexID2);
-#else
-    void JitGatherVertices(const FETCH_COMPILE_STATE &fetchState, Value* 
streams, Value* vIndices, Value* pVtxOut, bool useVertexID2);
-#endif
-#else
     void JitGatherVertices(const FETCH_COMPILE_STATE &fetchState, Value* 
streams, Value* vIndices, Value* pVtxOut);
-#endif
 
     bool IsOddFormat(SWR_FORMAT format);
     bool IsUniformFormat(SWR_FORMAT format);
@@ -157,16 +117,17 @@ Function* FetchJit::Create(const FETCH_COMPILE_STATE& 
fetchState)
     Value*    pVtxOut = &*argitr;
     pVtxOut->setName("vtxOutput");
 
+    uint32_t baseWidth = mVWidth;
+
+    SWR_ASSERT(mVWidth == 8 || mVWidth == 16, "Unsupported vector width %d", 
mVWidth);
+
+    // Override builder target width to force 16-wide SIMD
 #if USE_SIMD16_SHADERS
-#if 0// USE_SIMD16_BUILDER
-    pVtxOut = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, 
mVWidth16), 0));
-#else
-    pVtxOut = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, 
mVWidth), 0));
-#endif
-#else
-    pVtxOut = BITCAST(pVtxOut, PointerType::get(VectorType::get(mFP32Ty, 
mVWidth), 0));
+    SetTargetWidth(16);
 #endif
 
+    pVtxOut = BITCAST(pVtxOut, PointerType::get(mSimdFP32Ty, 0));
+
     // SWR_FETCH_CONTEXT::pStreams
     Value*    streams = LOAD(mpFetchInfo,{0, SWR_FETCH_CONTEXT_pStreams});
     streams->setName("pStreams");
@@ -178,99 +139,57 @@ Function* FetchJit::Create(const FETCH_COMPILE_STATE& 
fetchState)
     // SWR_FETCH_CONTEXT::pLastIndex
     Value*    pLastIndex = LOAD(mpFetchInfo,{0, SWR_FETCH_CONTEXT_pLastIndex});
     pLastIndex->setName("pLastIndex");
-    
 
     Value* vIndices;
-#if USE_SIMD16_SHADERS
-    Value* indices2;
-    Value* vIndices2;
-#endif
     switch(fetchState.indexType)
     {
         case R8_UINT:
             indices = BITCAST(indices, Type::getInt8PtrTy(JM()->mContext, 0));
-#if USE_SIMD16_SHADERS
-            indices2 = GEP(indices, C(8));
-#endif
             if(fetchState.bDisableIndexOOBCheck)
             {
                 vIndices = LOAD(BITCAST(indices, 
PointerType::get(VectorType::get(mInt8Ty, mpJitMgr->mVWidth), 0)), 
{(uint32_t)0});
                 vIndices = Z_EXT(vIndices, mSimdInt32Ty);
-#if USE_SIMD16_SHADERS
-                vIndices2 = LOAD(BITCAST(indices2, 
PointerType::get(VectorType::get(mInt8Ty, mpJitMgr->mVWidth), 0)), { 
(uint32_t)0 });
-                vIndices2 = Z_EXT(vIndices2, mSimdInt32Ty);
-#endif
             }
             else
             {
                 pLastIndex = BITCAST(pLastIndex, 
Type::getInt8PtrTy(JM()->mContext, 0));
                 vIndices = GetSimdValid8bitIndices(indices, pLastIndex);
-#if USE_SIMD16_SHADERS
-                pLastIndex = BITCAST(pLastIndex, 
Type::getInt8PtrTy(JM()->mContext, 0));
-                vIndices2 = GetSimdValid8bitIndices(indices2, pLastIndex);
-#endif
             }
             break;
         case R16_UINT: 
             indices = BITCAST(indices, Type::getInt16PtrTy(JM()->mContext, 
0)); 
-#if USE_SIMD16_SHADERS
-            indices2 = GEP(indices, C(8));
-#endif
             if(fetchState.bDisableIndexOOBCheck)
             {
                 vIndices = LOAD(BITCAST(indices, 
PointerType::get(VectorType::get(mInt16Ty, mpJitMgr->mVWidth), 0)), 
{(uint32_t)0});
                 vIndices = Z_EXT(vIndices, mSimdInt32Ty);
-#if USE_SIMD16_SHADERS
-                vIndices2 = LOAD(BITCAST(indices2, 
PointerType::get(VectorType::get(mInt16Ty, mpJitMgr->mVWidth), 0)), { 
(uint32_t)0 });
-                vIndices2 = Z_EXT(vIndices2, mSimdInt32Ty);
-#endif
             }
             else
             {
                 pLastIndex = BITCAST(pLastIndex, 
Type::getInt16PtrTy(JM()->mContext, 0));
                 vIndices = GetSimdValid16bitIndices(indices, pLastIndex);
-#if USE_SIMD16_SHADERS
-                pLastIndex = BITCAST(pLastIndex, 
Type::getInt16PtrTy(JM()->mContext, 0));
-                vIndices2 = GetSimdValid16bitIndices(indices2, pLastIndex);
-#endif
             }
             break;
         case R32_UINT:
-#if USE_SIMD16_SHADERS
-            indices2 = GEP(indices, C(8));
-#endif
             (fetchState.bDisableIndexOOBCheck) ? vIndices = 
LOAD(BITCAST(indices, PointerType::get(mSimdInt32Ty,0)),{(uint32_t)0})
                                                : vIndices = 
GetSimdValid32bitIndices(indices, pLastIndex);
-#if USE_SIMD16_SHADERS
-            (fetchState.bDisableIndexOOBCheck) ? vIndices2 = 
LOAD(BITCAST(indices2, PointerType::get(mSimdInt32Ty, 0)), { (uint32_t)0 })
-                                               : vIndices2 = 
GetSimdValid32bitIndices(indices2, pLastIndex);
-#endif
             break; // incoming type is already 32bit int
         default:
             SWR_INVALID("Unsupported index type");
             vIndices = nullptr;
-#if USE_SIMD16_SHADERS
-            vIndices2 = nullptr;
-#endif
             break;
     }
 
     if(fetchState.bForceSequentialAccessEnable)
     {
-        Value* pOffsets = C({ 0, 1, 2, 3, 4, 5, 6, 7 });
+        Value* pOffsets = mVWidth == 8 ? C({ 0, 1, 2, 3, 4, 5, 6, 7 }) : 
+            C({ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 });
 
         // VertexData buffers are accessed sequentially, the index is equal to 
the vertex number
         vIndices = VBROADCAST(LOAD(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_StartVertex }));
         vIndices = ADD(vIndices, pOffsets);
-#if USE_SIMD16_SHADERS
-        vIndices2 = ADD(vIndices, VIMMED1(8));
-#endif
     }
 
     Value* vVertexId = vIndices;
-#if USE_SIMD16_SHADERS
-    Value* vVertexId2 = vIndices2;
-#endif
     if (fetchState.bVertexIDOffsetEnable)
     {
         // Assuming one of baseVertex or startVertex is 0, so adding both 
should be functionally correct
@@ -278,51 +197,44 @@ Function* FetchJit::Create(const FETCH_COMPILE_STATE& 
fetchState)
         Value* vStartVertex = VBROADCAST(LOAD(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_StartVertex }));
         vVertexId = ADD(vIndices, vBaseVertex);
         vVertexId = ADD(vVertexId, vStartVertex);
-#if USE_SIMD16_SHADERS
-        vVertexId2 = ADD(vIndices2, vBaseVertex);
-        vVertexId2 = ADD(vVertexId2, vStartVertex);
-#endif
     }
 
     // store out vertex IDs
-    STORE(vVertexId, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID }));
-#if USE_SIMD16_SHADERS
-    STORE(vVertexId2, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID2 }));
-#endif
+    if (mVWidth == 16)
+    {
+        // store out in simd8 halves until core supports 16-wide natively
+        auto vVertexIdLo = EXTRACT_16(vVertexId, 0);
+        auto vVertexIdHi = EXTRACT_16(vVertexId, 1);
+        STORE(vVertexIdLo, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID 
}));
+        STORE(vVertexIdHi, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID2 
}));
+    }
+    else if (mVWidth == 8)
+    {
+        STORE(vVertexId, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_VertexID }));
+    }
 
     // store out cut mask if enabled
     if (fetchState.bEnableCutIndex)
     {
         Value* vCutIndex = VIMMED1(fetchState.cutIndex);
         Value* cutMask = VMASK(ICMP_EQ(vIndices, vCutIndex));
-        STORE(cutMask, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CutMask }));
-#if USE_SIMD16_SHADERS
-        Value* cutMask2 = VMASK(ICMP_EQ(vIndices2, vCutIndex));
-        STORE(cutMask2, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CutMask2 }));
-#endif
+        
+        if (mVWidth == 16)
+        {
+            auto cutMaskLo = EXTRACT_16(cutMask, 0);
+            auto cutMaskHi = EXTRACT_16(cutMask, 1);
+            STORE(cutMaskLo, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CutMask 
}));
+            STORE(cutMaskHi, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CutMask2 
}));
+        }
+        else if (mVWidth == 8)
+        {
+            STORE(cutMask, GEP(mpFetchInfo, { 0, SWR_FETCH_CONTEXT_CutMask }));
+        }
     }
 
     // Fetch attributes from memory and output to a simdvertex struct
     // since VGATHER has a perf penalty on HSW vs BDW, allow client to choose 
which fetch method to use
-#if USE_SIMD16_SHADERS
-    if (fetchState.bDisableVGATHER)
-    {
-        JitLoadVertices(fetchState, streams, vIndices, pVtxOut);
-        JitLoadVertices(fetchState, streams, vIndices2, GEP(pVtxOut, C(1)));
-    }
-    else
-    {
-#if USE_SIMD16_GATHERS
-        JitGatherVertices(fetchState, streams, vIndices, vIndices2, pVtxOut, 
false);
-#else
-        JitGatherVertices(fetchState, streams, vIndices, pVtxOut, false);
-        JitGatherVertices(fetchState, streams, vIndices2, GEP(pVtxOut, C(1)), 
true);
-#endif
-    }
-#else
-    (fetchState.bDisableVGATHER) ? JitLoadVertices(fetchState, streams, 
vIndices, pVtxOut)
-                                 : JitGatherVertices(fetchState, streams, 
vIndices, pVtxOut);
-#endif
+    JitGatherVertices(fetchState, streams, vIndices, pVtxOut);
 
     RET_VOID();
 
@@ -363,335 +275,12 @@ Function* FetchJit::Create(const FETCH_COMPILE_STATE& 
fetchState)
     JitManager::DumpToFile(fetch, "opt");
 
 
-    return fetch;
-}
-
-//////////////////////////////////////////////////////////////////////////
-/// @brief Loads attributes from memory using LOADs, shuffling the 
-/// components into SOA form. 
-/// *Note* currently does not support component control,
-/// component packing, instancing
-/// @param fetchState - info about attributes to be fetched from memory
-/// @param streams - value pointer to the current vertex stream
-/// @param vIndices - vector value of indices to load
-/// @param pVtxOut - value pointer to output simdvertex struct
-void FetchJit::JitLoadVertices(const FETCH_COMPILE_STATE &fetchState, Value* 
streams, Value* vIndices, Value* pVtxOut)
-{
-    // Zack shuffles; a variant of the Charleston.
-
-    std::vector<Value*> vectors(16);
-    std::vector<Constant*>    pMask(mVWidth);
-    for(uint32_t i = 0; i < mVWidth; ++i)
-    {
-        pMask[i] = (C(i < 4 ? i : 4));
-    }
-    Constant* promoteMask = ConstantVector::get(pMask);
-    Constant* uwvec = UndefValue::get(VectorType::get(mFP32Ty, 4));
-
-    Value* startVertex = LOAD(mpFetchInfo, {0, SWR_FETCH_CONTEXT_StartVertex});
-    Value* startInstance = LOAD(mpFetchInfo, {0, 
SWR_FETCH_CONTEXT_StartInstance});
-    Value* curInstance = LOAD(mpFetchInfo, {0, SWR_FETCH_CONTEXT_CurInstance});
-    Value* vBaseVertex = VBROADCAST(LOAD(mpFetchInfo, {0, 
SWR_FETCH_CONTEXT_BaseVertex}));
-    curInstance->setName("curInstance");
-
-    for(uint32_t nelt = 0; nelt < fetchState.numAttribs; ++nelt)
-    {
-        Value*    elements[4] = {0};
-        const INPUT_ELEMENT_DESC& ied = fetchState.layout[nelt];
-        const SWR_FORMAT_INFO &info = GetFormatInfo((SWR_FORMAT)ied.Format);
-        SWR_ASSERT((info.bpp != 0), "Unsupported format in JitLoadVertices.");
-        uint32_t    numComponents = info.numComps;
-        uint32_t bpc = info.bpp / info.numComps;  ///@todo Code below assumes 
all components are same size. Need to fix.
-
-        // load path doesn't support component packing
-        SWR_ASSERT(ied.ComponentPacking == ComponentEnable::XYZW, "Fetch load 
path doesn't support component packing.");
-
-        vectors.clear();
-
-        if (fetchState.bInstanceIDOffsetEnable)
-        {
-            SWR_ASSERT((0), "TODO: Fill out more once driver sends this down");
-        }
-
-        Value *vCurIndices;
-        Value *startOffset;
-        if(ied.InstanceEnable)
-        {
-            Value* stepRate = C(ied.InstanceAdvancementState);
-
-            // prevent a div by 0 for 0 step rate
-            Value* isNonZeroStep = ICMP_UGT(stepRate, C(0));
-            stepRate = SELECT(isNonZeroStep, stepRate, C(1));
-
-            // calc the current offset into instanced data buffer
-            Value* calcInstance = UDIV(curInstance, stepRate);
-
-            // if step rate is 0, every instance gets instance 0
-            calcInstance = SELECT(isNonZeroStep, calcInstance, C(0));
-
-            vCurIndices = VBROADCAST(calcInstance);
-
-            startOffset = startInstance;
-        }
-        else if (ied.InstanceStrideEnable)
-        {
-            // silence unused variable warnings
-            startOffset = C(0);
-            vCurIndices = vIndices;
-
-            SWR_ASSERT((0), "TODO: Fill out more once driver sends this 
down.");
-        }
-        else
-        {
-            // offset indices by baseVertex
-            vCurIndices = ADD(vIndices, vBaseVertex);
-
-            startOffset = startVertex;
-        }
-
-        // load SWR_VERTEX_BUFFER_STATE::pData
-        Value *stream = LOAD(streams, {ied.StreamIndex, 
SWR_VERTEX_BUFFER_STATE_xpData});
-
-        // load SWR_VERTEX_BUFFER_STATE::pitch
-        Value *stride = LOAD(streams, {ied.StreamIndex, 
SWR_VERTEX_BUFFER_STATE_pitch});
-        stride = Z_EXT(stride, mInt64Ty);
-
-        // load SWR_VERTEX_BUFFER_STATE::size
-        Value *size = LOAD(streams, {ied.StreamIndex, 
SWR_VERTEX_BUFFER_STATE_size});
-        size = Z_EXT(size, mInt64Ty);
-
-        Value* startVertexOffset = MUL(Z_EXT(startOffset, mInt64Ty), stride);
-
-        Value *minVertex = NULL;
-        Value *minVertexOffset = NULL;
-        if (fetchState.bPartialVertexBuffer) {
-            // fetch min index for low bounds checking
-            minVertex = GEP(streams, {C(ied.StreamIndex), 
C(SWR_VERTEX_BUFFER_STATE_minVertex)});
-            minVertex = LOAD(minVertex);
-            if (!fetchState.bDisableIndexOOBCheck) {
-                minVertexOffset = MUL(Z_EXT(minVertex, mInt64Ty), stride);
-            }
-        }
-
-        // Load from the stream.
-        for(uint32_t lane = 0; lane < mVWidth; ++lane)
-        {
-            // Get index
-            Value* index = VEXTRACT(vCurIndices, C(lane));
-
-            if (fetchState.bPartialVertexBuffer) {
-                // clamp below minvertex
-                Value *isBelowMin = ICMP_SLT(index, minVertex);
-                index = SELECT(isBelowMin, minVertex, index);
-            }
-
-            index = Z_EXT(index, mInt64Ty);
-
-            Value*    offset = MUL(index, stride);
-            offset = ADD(offset, C((int64_t)ied.AlignedByteOffset));
-            offset = ADD(offset, startVertexOffset);
-
-            if (!fetchState.bDisableIndexOOBCheck) {
-                // check for out of bound access, including partial OOB, and 
replace them with minVertex
-                Value *endOffset = ADD(offset, C((int64_t)info.Bpp));
-                Value *oob = ICMP_ULE(endOffset, size);
-                if (fetchState.bPartialVertexBuffer) {
-                    offset = SELECT(oob, offset, minVertexOffset);
-                } else {
-                    offset = SELECT(oob, offset, ConstantInt::get(mInt64Ty, 
0));
-                }
-            }
-
-            Value*    pointer = GEP(stream, offset);
-            // We use a full-lane, but don't actually care.
-            Value*    vptr = 0;
-
-            // get a pointer to a 4 component attrib in default address space
-            switch(bpc)
-            {
-                case 8: vptr = BITCAST(pointer, 
PointerType::get(VectorType::get(mInt8Ty, 4), 0)); break;
-                case 16: vptr = BITCAST(pointer, 
PointerType::get(VectorType::get(mInt16Ty, 4), 0)); break;
-                case 32: vptr = BITCAST(pointer, 
PointerType::get(VectorType::get(mFP32Ty, 4), 0)); break;
-                default: SWR_INVALID("Unsupported underlying bpp!");
-            }
-
-            // load 4 components of attribute
-            Value*    vec = ALIGNED_LOAD(vptr, 1, false);
-
-            // Convert To FP32 internally
-            switch(info.type[0])
-            {
-                case SWR_TYPE_UNORM:
-                    switch(bpc)
-                    {
-                        case 8:
-                            vec = UI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
-                            vec = FMUL(vec, 
ConstantVector::get(std::vector<Constant*>(4, ConstantFP::get(mFP32Ty, 1.0 / 
255.0))));
-                            break;
-                        case 16:
-                            vec = UI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
-                            vec = FMUL(vec, 
ConstantVector::get(std::vector<Constant*>(4, ConstantFP::get(mFP32Ty, 1.0 / 
65535.0))));
-                            break;
-                        default:
-                            SWR_INVALID("Unsupported underlying type!");
-                            break;
-                    }
-                    break;
-                case SWR_TYPE_SNORM:
-                    switch(bpc)
-                    {
-                        case 8:
-                            vec = SI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
-                            vec = FMUL(vec, 
ConstantVector::get(std::vector<Constant*>(4, ConstantFP::get(mFP32Ty, 1.0 / 
128.0))));
-                            break;
-                        case 16:
-                            vec = SI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
-                            vec = FMUL(vec, 
ConstantVector::get(std::vector<Constant*>(4, ConstantFP::get(mFP32Ty, 1.0 / 
32768.0))));
-                            break;
-                        default:
-                            SWR_INVALID("Unsupported underlying type!");
-                            break;
-                    }
-                    break;
-                case SWR_TYPE_UINT:
-                    // Zero extend uint32_t types.
-                    switch(bpc)
-                    {
-                        case 8:
-                        case 16:
-                            vec = Z_EXT(vec, VectorType::get(mInt32Ty, 4));
-                            vec = BITCAST(vec, VectorType::get(mFP32Ty, 4));
-                            break;
-                        case 32:
-                            break; // Pass through unchanged.
-                        default:
-                            SWR_INVALID("Unsupported underlying type!");
-                            break;
-                    }
-                    break;
-                case SWR_TYPE_SINT:
-                    // Sign extend SINT types.
-                    switch(bpc)
-                    {
-                        case 8:
-                        case 16:
-                            vec = S_EXT(vec, VectorType::get(mInt32Ty, 4));
-                            vec = BITCAST(vec, VectorType::get(mFP32Ty, 4));
-                            break;
-                        case 32:
-                            break; // Pass through unchanged.
-                        default:
-                            SWR_INVALID("Unsupported underlying type!");
-                            break;
-                    }
-                    break;
-                case SWR_TYPE_FLOAT:
-                    switch(bpc)
-                    {
-                        case 32:
-                            break; // Pass through unchanged.
-                        default:
-                            SWR_INVALID("Unsupported underlying type!");
-                    }
-                    break;
-                case SWR_TYPE_USCALED:
-                    vec = UI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
-                    break;
-                case SWR_TYPE_SSCALED:
-                    vec = SI_TO_FP(vec, VectorType::get(mFP32Ty, 4));
-                    break;
-                case SWR_TYPE_SFIXED:
-                    vec = FMUL(SI_TO_FP(vec, VectorType::get(mFP32Ty, 4)), 
VBROADCAST(C(1/65536.0f)));
-                    break;
-                case SWR_TYPE_UNKNOWN:
-                case SWR_TYPE_UNUSED:
-                    SWR_INVALID("Unsupported type %d!", info.type[0]);
-            }
-
-            // promote mask: sse(0,1,2,3) | avx(0,1,2,3,4,4,4,4)
-            // uwvec: 4 x F32, undef value
-            Value*    wvec = VSHUFFLE(vec, uwvec, promoteMask);
-            vectors.push_back(wvec);
-        }
-
-        std::vector<Constant*>        v01Mask(mVWidth);
-        std::vector<Constant*>        v23Mask(mVWidth);
-        std::vector<Constant*>        v02Mask(mVWidth);
-        std::vector<Constant*>        v13Mask(mVWidth);
-
-        // Concatenate the vectors together.
-        elements[0] = VUNDEF_F(); 
-        elements[1] = VUNDEF_F(); 
-        elements[2] = VUNDEF_F(); 
-        elements[3] = VUNDEF_F(); 
-        for(uint32_t b = 0, num4Wide = mVWidth / 4; b < num4Wide; ++b)
-        {
-            v01Mask[4 * b + 0] = C(0 + 4 * b);
-            v01Mask[4 * b + 1] = C(1 + 4 * b);
-            v01Mask[4 * b + 2] = C(0 + 4 * b + mVWidth);
-            v01Mask[4 * b + 3] = C(1 + 4 * b + mVWidth);
-
-            v23Mask[4 * b + 0] = C(2 + 4 * b);
-            v23Mask[4 * b + 1] = C(3 + 4 * b);
-            v23Mask[4 * b + 2] = C(2 + 4 * b + mVWidth);
-            v23Mask[4 * b + 3] = C(3 + 4 * b + mVWidth);
-
-            v02Mask[4 * b + 0] = C(0 + 4 * b);
-            v02Mask[4 * b + 1] = C(2 + 4 * b);
-            v02Mask[4 * b + 2] = C(0 + 4 * b + mVWidth);
-            v02Mask[4 * b + 3] = C(2 + 4 * b + mVWidth);
-
-            v13Mask[4 * b + 0] = C(1 + 4 * b);
-            v13Mask[4 * b + 1] = C(3 + 4 * b);
-            v13Mask[4 * b + 2] = C(1 + 4 * b + mVWidth);
-            v13Mask[4 * b + 3] = C(3 + 4 * b + mVWidth);
-
-            std::vector<Constant*>    iMask(mVWidth);
-            for(uint32_t i = 0; i < mVWidth; ++i)
-            {
-                if(((4 * b) <= i) && (i < (4 * (b + 1))))
-                {
-                    iMask[i] = C(i % 4 + mVWidth);
-                }
-                else
-                {
-                    iMask[i] = C(i);
-                }
-            }
-            Constant* insertMask = ConstantVector::get(iMask);
-            elements[0] = VSHUFFLE(elements[0], vectors[4 * b + 0], 
insertMask);
-            elements[1] = VSHUFFLE(elements[1], vectors[4 * b + 1], 
insertMask);
-            elements[2] = VSHUFFLE(elements[2], vectors[4 * b + 2], 
insertMask);
-            elements[3] = VSHUFFLE(elements[3], vectors[4 * b + 3], 
insertMask);
-        }
-
-        Value* x0y0x1y1 = VSHUFFLE(elements[0], elements[1], 
ConstantVector::get(v01Mask));
-        Value* x2y2x3y3 = VSHUFFLE(elements[2], elements[3], 
ConstantVector::get(v01Mask));
-        Value* z0w0z1w1 = VSHUFFLE(elements[0], elements[1], 
ConstantVector::get(v23Mask));
-        Value* z2w3z2w3 = VSHUFFLE(elements[2], elements[3], 
ConstantVector::get(v23Mask));
-        elements[0] = VSHUFFLE(x0y0x1y1, x2y2x3y3, 
ConstantVector::get(v02Mask));
-        elements[1] = VSHUFFLE(x0y0x1y1, x2y2x3y3, 
ConstantVector::get(v13Mask));
-        elements[2] = VSHUFFLE(z0w0z1w1, z2w3z2w3, 
ConstantVector::get(v02Mask));
-        elements[3] = VSHUFFLE(z0w0z1w1, z2w3z2w3, 
ConstantVector::get(v13Mask));
-
-        switch(numComponents + 1)
-        {
-            case    1: elements[0] = VIMMED1(0.0f);
-            case    2: elements[1] = VIMMED1(0.0f);
-            case    3: elements[2] = VIMMED1(0.0f);
-            case    4: elements[3] = VIMMED1(1.0f);
-        }
-
-        for(uint32_t c = 0; c < 4; ++c)
-        {
+    // Revert 16-wide override
 #if USE_SIMD16_SHADERS
-            Value* dest = GEP(pVtxOut, C(nelt * 8 + c * 2), "destGEP");
-#else
-            Value* dest = GEP(pVtxOut, C(nelt * 4 + c), "destGEP");
+    SetTargetWidth(baseWidth);
 #endif
-            STORE(elements[c], dest);
-        }
-    }
+ 
+    return fetch;
 }
 
 // returns true for odd formats that require special state.gather handling
@@ -894,34 +483,17 @@ void FetchJit::ConvertFormat(SWR_FORMAT format, Value 
*texels[4])
 /// @param streams - value pointer to the current vertex stream
 /// @param vIndices - vector value of indices to gather
 /// @param pVtxOut - value pointer to output simdvertex struct
-#if USE_SIMD16_SHADERS
-#if USE_SIMD16_GATHERS
-void FetchJit::JitGatherVertices(const FETCH_COMPILE_STATE &fetchState,
-    Value *streams, Value *vIndices, Value *vIndices2, Value *pVtxOut, bool 
useVertexID2)
-#else
-void FetchJit::JitGatherVertices(const FETCH_COMPILE_STATE &fetchState,
-    Value* streams, Value* vIndices, Value* pVtxOut, bool useVertexID2)
-#endif
-#else
 void FetchJit::JitGatherVertices(const FETCH_COMPILE_STATE &fetchState,
     Value* streams, Value* vIndices, Value* pVtxOut)
-#endif
 {
     uint32_t currentVertexElement = 0;
     uint32_t outputElt = 0;
     Value* vVertexElements[4];
-#if USE_SIMD16_GATHERS
-    Value *pVtxSrc2[4];
-#endif
 
     Value* startVertex = LOAD(mpFetchInfo, {0, SWR_FETCH_CONTEXT_StartVertex});
     Value* startInstance = LOAD(mpFetchInfo, {0, 
SWR_FETCH_CONTEXT_StartInstance});
     Value* curInstance = LOAD(mpFetchInfo, {0, SWR_FETCH_CONTEXT_CurInstance});
-#if USE_SIMD16_GATHERS
-    Value* vBaseVertex16 = VBROADCAST_16(LOAD(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_BaseVertex }));
-#else
     Value* vBaseVertex = VBROADCAST(LOAD(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_BaseVertex }));
-#endif
     curInstance->setName("curInstance");
 
     for (uint32_t nInputElt = 0; nInputElt < fetchState.numAttribs; nInputElt 
+= 1)
@@ -944,11 +516,7 @@ void FetchJit::JitGatherVertices(const FETCH_COMPILE_STATE 
&fetchState,
         Value *pStreamBase = INT_TO_PTR(stream, PointerType::get(mInt8Ty, 0));
 
         Value *stride = LOAD(streams, {ied.StreamIndex, 
SWR_VERTEX_BUFFER_STATE_pitch});
-#if USE_SIMD16_GATHERS
-        Value *vStride16 = VBROADCAST_16(stride);
-#else
         Value *vStride = VBROADCAST(stride);
-#endif
 
         // max vertex index that is fully in bounds
         Value *maxVertex = GEP(streams, {C(ied.StreamIndex), 
C(SWR_VERTEX_BUFFER_STATE_maxVertex)});
@@ -968,17 +536,9 @@ void FetchJit::JitGatherVertices(const FETCH_COMPILE_STATE 
&fetchState,
             curInstance = ADD(curInstance, startInstance);
         }
 
-#if USE_SIMD16_GATHERS
-        Value *vCurIndices16;
-#else
         Value *vCurIndices;
-#endif
         Value *startOffset;
-#if USE_SIMD16_GATHERS
-        Value *vInstanceStride16 = VIMMED1_16(0);
-#else
         Value *vInstanceStride = VIMMED1(0);
-#endif
 
         if (ied.InstanceEnable)
         {
@@ -994,32 +554,17 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
             // if step rate is 0, every instance gets instance 0
             calcInstance = SELECT(isNonZeroStep, calcInstance, C(0));
 
-#if USE_SIMD16_GATHERS
-            vCurIndices16 = VBROADCAST_16(calcInstance);
-#else
             vCurIndices = VBROADCAST(calcInstance);
-#endif
-
             startOffset = startInstance;
         }
         else if (ied.InstanceStrideEnable)
         {
             // grab the instance advancement state, determines stride in bytes 
from one instance to the next
             Value* stepRate = C(ied.InstanceAdvancementState);
-#if USE_SIMD16_GATHERS
-            vInstanceStride16 = VBROADCAST_16(MUL(curInstance, stepRate));
-#else
             vInstanceStride = VBROADCAST(MUL(curInstance, stepRate));
-#endif
 
             // offset indices by baseVertex
-#if USE_SIMD16_GATHERS
-            Value *vIndices16 = JOIN_16(vIndices, vIndices2);
-
-            vCurIndices16 = ADD(vIndices16, vBaseVertex16);
-#else
             vCurIndices = ADD(vIndices, vBaseVertex);
-#endif
 
             startOffset = startVertex;
             SWR_ASSERT((0), "TODO: Fill out more once driver sends this 
down.");
@@ -1027,14 +572,7 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
         else
         {
             // offset indices by baseVertex
-#if USE_SIMD16_GATHERS
-            Value *vIndices16 = JOIN_16(vIndices, vIndices2);
-
-            vCurIndices16 = ADD(vIndices16, vBaseVertex16);
-#else
             vCurIndices = ADD(vIndices, vBaseVertex);
-#endif
-
             startOffset = startVertex;
         }
 
@@ -1063,67 +601,13 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
         // Load the in bounds size of a partially valid vertex
         Value *partialInboundsSize = GEP(streams, {C(ied.StreamIndex), 
C(SWR_VERTEX_BUFFER_STATE_partialInboundsSize)});
         partialInboundsSize = LOAD(partialInboundsSize);
-#if USE_SIMD16_GATHERS
-        Value *vPartialVertexSize = VBROADCAST_16(partialInboundsSize);
-        Value *vBpp = VBROADCAST_16(C(info.Bpp));
-        Value *vAlignmentOffsets = VBROADCAST_16(C(ied.AlignedByteOffset));
-#else
         Value *vPartialVertexSize = VBROADCAST(partialInboundsSize);
         Value *vBpp = VBROADCAST(C(info.Bpp));
         Value *vAlignmentOffsets = VBROADCAST(C(ied.AlignedByteOffset));
-#endif
 
         // is the element is <= the partially valid size
         Value *vElementInBoundsMask = ICMP_SLE(vBpp, SUB(vPartialVertexSize, 
vAlignmentOffsets));
 
-#if USE_SIMD16_GATHERS
-        // override cur indices with 0 if pitch is 0
-        Value *pZeroPitchMask16 = ICMP_EQ(vStride16, VIMMED1_16(0));
-        vCurIndices16 = SELECT(pZeroPitchMask16, VIMMED1_16(0), vCurIndices16);
-
-        // are vertices partially OOB?
-        Value *vMaxVertex16 = VBROADCAST_16(maxVertex);
-        Value *vPartialOOBMask = ICMP_EQ(vCurIndices16, vMaxVertex16);
-
-        // are vertices fully in bounds?
-        Value *vMaxGatherMask16 = ICMP_ULT(vCurIndices16, vMaxVertex16);
-
-        Value *vGatherMask16;
-
-        if (fetchState.bPartialVertexBuffer)
-        {
-            // are vertices below minVertex limit?
-            Value *vMinVertex16 = VBROADCAST_16(minVertex);
-            Value *vMinGatherMask16 = ICMP_UGE(vCurIndices16, vMinVertex16);
-
-            // only fetch lanes that pass both tests
-            vGatherMask16 = AND(vMaxGatherMask16, vMinGatherMask16);
-        }
-        else
-        {
-            vGatherMask16 = vMaxGatherMask16;
-        }
-
-        // blend in any partially OOB indices that have valid elements
-        vGatherMask16 = SELECT(vPartialOOBMask, vElementInBoundsMask, 
vGatherMask16);
-
-        // calculate the actual offsets into the VB
-        Value *vOffsets16 = MUL(vCurIndices16, vStride16);
-        vOffsets16 = ADD(vOffsets16, vAlignmentOffsets);
-
-        // if instance stride enable is:
-        //  true  - add product of the instanceID and advancement state to the 
offst into the VB
-        //  false - value of vInstanceStride has been initialialized to zero
-        vOffsets16 = ADD(vOffsets16, vInstanceStride16);
-
-        // TODO: remove the following simd8 interop stuff once all code paths 
are fully widened to SIMD16..
-
-        Value *vGatherMask  = EXTRACT_16(vGatherMask16, 0);
-        Value *vGatherMask2 = EXTRACT_16(vGatherMask16, 1);
-
-        Value *vOffsets  = EXTRACT_16(vOffsets16, 0);
-        Value *vOffsets2 = EXTRACT_16(vOffsets16, 1);
-#else
         // override cur indices with 0 if pitch is 0
         Value* pZeroPitchMask = ICMP_EQ(vStride, VIMMED1(0));
         vCurIndices = SELECT(pZeroPitchMask, VIMMED1(0), vCurIndices);
@@ -1162,125 +646,45 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
         //  false - value of vInstanceStride has been initialialized to zero
         vOffsets = ADD(vOffsets, vInstanceStride);
 
-#endif
         // Packing and component control 
         ComponentEnable compMask = (ComponentEnable)ied.ComponentPacking;
         const ComponentControl compCtrl[4] { 
(ComponentControl)ied.ComponentControl0, 
(ComponentControl)ied.ComponentControl1, 
                                              
(ComponentControl)ied.ComponentControl2, 
(ComponentControl)ied.ComponentControl3}; 
 
-        // Special gather/conversion for formats without equal component sizes
-        if (IsOddFormat((SWR_FORMAT)ied.Format))
-        {
-#if USE_SIMD16_GATHERS
-            Value *pResults[4];
-            Value *pResults2[4];
-            CreateGatherOddFormats((SWR_FORMAT)ied.Format, vGatherMask,  
pStreamBase, vOffsets,  pResults);
-            CreateGatherOddFormats((SWR_FORMAT)ied.Format, vGatherMask2, 
pStreamBase, vOffsets2, pResults2);
-            ConvertFormat((SWR_FORMAT)ied.Format, pResults);
-            ConvertFormat((SWR_FORMAT)ied.Format, pResults2);
-
-            for (uint32_t c = 0; c < 4; c += 1)
-            {
-                if (isComponentEnabled(compMask, c))
-                {
-                    // pack adjacent pairs of SIMD8s into SIMD16s
-                    pVtxSrc2[currentVertexElement++] = JOIN_16(pResults[c], 
pResults2[c]);
-
-                    if (currentVertexElement > 3)
-                    {
-                        // store SIMD16s
-                        Value *pVtxOut2 = BITCAST(pVtxOut, 
PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
-                        StoreVertexElements16(pVtxOut2, outputElt++, 4, 
pVtxSrc2);
-                        // reset to the next vVertexElement to output
-                        currentVertexElement = 0;
-                    }
-                }
-            }
-#else
-            Value *pResults[4];
-            CreateGatherOddFormats((SWR_FORMAT)ied.Format, vGatherMask, 
pStreamBase, vOffsets, pResults);
-            ConvertFormat((SWR_FORMAT)ied.Format, pResults);
-
-            for (uint32_t c = 0; c < 4; c += 1)
-            {
-                if (isComponentEnabled(compMask, c))
-                {
-                    vVertexElements[currentVertexElement++] = pResults[c];
-                    if (currentVertexElement > 3)
-                    {
-                        StoreVertexElements(pVtxOut, outputElt++, 4, 
vVertexElements);
-                        // reset to the next vVertexElement to output
-                        currentVertexElement = 0;
-                    }
-                }
-            }
-#endif
-        }
-        else if(info.type[0] == SWR_TYPE_FLOAT)
-        {
-            ///@todo: support 64 bit vb accesses
-            Value *gatherSrc = VIMMED1(0.0f);
-#if USE_SIMD16_GATHERS
-            Value *gatherSrc16 = VIMMED1_16(0.0f);
-#endif
-
-            SWR_ASSERT(IsUniformFormat((SWR_FORMAT)ied.Format), 
-                "Unsupported format for standard gather fetch.");
-
-            // Gather components from memory to store in a simdvertex structure
-            switch (bpc)
-            {
-                case 16:
-                {
-#if USE_SIMD16_GATHERS
-                    Value *gatherResult[2];
-
-                    // if we have at least one component out of x or y to fetch
-                    if (isComponentEnabled(compMask, 0) || 
isComponentEnabled(compMask, 1))
-                    {
-                        gatherResult[0] = GATHERPS_16(gatherSrc16, 
pStreamBase, vOffsets16, vGatherMask16);
-
-                        // e.g. result of first 8x32bit integer gather for 
16bit components
-                        // 256i - 0    1    2    3    4    5    6    7
-                        //        xyxy xyxy xyxy xyxy xyxy xyxy xyxy xyxy
-                        //
-                    }
-                    else
-                    {
-                        gatherResult[0] = VUNDEF_I_16();
-                    }
-
-                    // if we have at least one component out of z or w to fetch
-                    if (isComponentEnabled(compMask, 2) || 
isComponentEnabled(compMask, 3))
-                    {
-                        // offset base to the next components(zw) in the 
vertex to gather
-                        pStreamBase = GEP(pStreamBase, C((char)4));
-
-                        gatherResult[1] = GATHERPS_16(gatherSrc16, 
pStreamBase, vOffsets16, vGatherMask16);
-
-                        // e.g. result of second 8x32bit integer gather for 
16bit components
-                        // 256i - 0    1    2    3    4    5    6    7
-                        //        zwzw zwzw zwzw zwzw zwzw zwzw zwzw zwzw 
-                        //
-                    }
-                    else
-                    {
-                        gatherResult[1] = VUNDEF_I_16();
-                    }
+        // Special gather/conversion for formats without equal component sizes
+        if (IsOddFormat((SWR_FORMAT)ied.Format))
+        {
+            Value *pResults[4];
+            CreateGatherOddFormats((SWR_FORMAT)ied.Format, vGatherMask, 
pStreamBase, vOffsets, pResults);
+            ConvertFormat((SWR_FORMAT)ied.Format, pResults);
 
-                    // if we have at least one component to shuffle into place
-                    if (compMask)
+            for (uint32_t c = 0; c < 4; c += 1)
+            {
+                if (isComponentEnabled(compMask, c))
+                {
+                    vVertexElements[currentVertexElement++] = pResults[c];
+                    if (currentVertexElement > 3)
                     {
-                        Value *pVtxOut2 = BITCAST(pVtxOut, 
PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
+                        StoreVertexElements(pVtxOut, outputElt++, 4, 
vVertexElements);
+                        // reset to the next vVertexElement to output
+                        currentVertexElement = 0;
+                    }
+                }
+            }
+        }
+        else if(info.type[0] == SWR_TYPE_FLOAT)
+        {
+            ///@todo: support 64 bit vb accesses
+            Value *gatherSrc = VIMMED1(0.0f);
 
-                        Shuffle16bpcArgs args = 
std::forward_as_tuple(gatherResult, pVtxOut2, Instruction::CastOps::FPExt, 
CONVERT_NONE,
-                            currentVertexElement, outputElt, compMask, 
compCtrl, pVtxSrc2);
+            SWR_ASSERT(IsUniformFormat((SWR_FORMAT)ied.Format), 
+                "Unsupported format for standard gather fetch.");
 
-                        // Shuffle gathered components into place in 
simdvertex struct
-                        Shuffle16bpcGather16(args);  // outputs to 
vVertexElements ref
-                    }
-#else
+            // Gather components from memory to store in a simdvertex structure
+            switch (bpc)
+            {
+                case 16:
+                {
                     Value *vGatherResult[2];
 
                     // if we have at least one component out of x or y to fetch
@@ -1313,49 +717,14 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
                             currentVertexElement, outputElt, compMask, 
compCtrl, vVertexElements);
 
                         // Shuffle gathered components into place in 
simdvertex struct
-#if USE_SIMD16_SHADERS
-                        Shuffle16bpcGather(args, useVertexID2);  // outputs to 
vVertexElements ref
-#else
-                        Shuffle16bpcGather(args);  // outputs to 
vVertexElements ref
-#endif
+                        mVWidth == 16 ? Shuffle16bpcGather16(args) : 
Shuffle16bpcGather(args);  // outputs to vVertexElements ref
                     }
-#endif
                 }
                     break;
                 case 32:
                 {
                     for (uint32_t i = 0; i < 4; i += 1)
                     {
-#if USE_SIMD16_GATHERS
-                        if (isComponentEnabled(compMask, i))
-                        {
-                            // if we need to gather the component
-                            if (compCtrl[i] == StoreSrc)
-                            {
-                                // Gather a SIMD of vertices
-                                // APIs allow a 4GB range for offsets
-                                // However, GATHERPS uses signed 32-bit 
offsets, so only a 2GB range :(
-                                // But, we know that elements must be aligned 
for FETCH. :)
-                                // Right shift the offset by a bit and then 
scale by 2 to remove the sign extension.
-                                Value *shiftedOffsets16 = LSHR(vOffsets16, 1);
-                                pVtxSrc2[currentVertexElement++] = 
GATHERPS_16(gatherSrc16, pStreamBaseGFX, shiftedOffsets16, vGatherMask16, 2, 
GFX_MEM_CLIENT_FETCH);
-                            }
-                            else
-                            {
-                                pVtxSrc2[currentVertexElement++] = 
GenerateCompCtrlVector16(compCtrl[i]);
-                            }
-
-                            if (currentVertexElement > 3)
-                            {
-                                // store SIMD16s
-                                Value *pVtxOut2 = BITCAST(pVtxOut, 
PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
-                                StoreVertexElements16(pVtxOut2, outputElt++, 
4, pVtxSrc2);
-                                // reset to the next vVertexElement to output
-                                currentVertexElement = 0;
-                            }
-                        }
-#else
                         if (isComponentEnabled(compMask, i))
                         {
                             // if we need to gather the component
@@ -1371,11 +740,7 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
                             }
                             else
                             {
-#if USE_SIMD16_SHADERS
-                                vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
                                 vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i]);
-#endif
                             }
 
                             if (currentVertexElement > 3)
@@ -1385,7 +750,6 @@ void FetchJit::JitGatherVertices(const FETCH_COMPILE_STATE 
&fetchState,
                                 currentVertexElement = 0;
                             }
                         }
-#endif
 
                         // offset base to the next component in the vertex to 
gather
                         pStreamBase = GEP(pStreamBase, C((char)4));
@@ -1397,59 +761,6 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
                 {
                     for (uint32_t i = 0; i < 4; i += 1)
                     {
-#if USE_SIMD16_GATHERS
-                        if (isComponentEnabled(compMask, i))
-                        {
-                            // if we need to gather the component
-                            if (compCtrl[i] == StoreSrc)
-                            {
-                                Value *vMaskLo  = VSHUFFLE(vGatherMask,  
VUNDEF(mInt1Ty, 8), C({ 0, 1, 2, 3 }));
-                                Value *vMaskLo2 = VSHUFFLE(vGatherMask2, 
VUNDEF(mInt1Ty, 8), C({ 0, 1, 2, 3 }));
-                                Value *vMaskHi  = VSHUFFLE(vGatherMask,  
VUNDEF(mInt1Ty, 8), C({ 4, 5, 6, 7 }));
-                                Value *vMaskHi2 = VSHUFFLE(vGatherMask2, 
VUNDEF(mInt1Ty, 8), C({ 4, 5, 6, 7 }));
-
-                                Value *vOffsetsLo  = VEXTRACTI128(vOffsets,  
C(0));
-                                Value *vOffsetsLo2 = VEXTRACTI128(vOffsets2, 
C(0));
-                                Value *vOffsetsHi  = VEXTRACTI128(vOffsets,  
C(1));
-                                Value *vOffsetsHi2 = VEXTRACTI128(vOffsets2, 
C(1));
-
-                                Value *vZeroDouble = VECTOR_SPLAT(4, 
ConstantFP::get(IRB()->getDoubleTy(), 0.0f));
-
-                                Value* pGatherLo  = GATHERPD(vZeroDouble, 
pStreamBase, vOffsetsLo,  vMaskLo);
-                                Value* pGatherLo2 = GATHERPD(vZeroDouble, 
pStreamBase, vOffsetsLo2, vMaskLo2);
-                                Value* pGatherHi  = GATHERPD(vZeroDouble, 
pStreamBase, vOffsetsHi,  vMaskHi);
-                                Value* pGatherHi2 = GATHERPD(vZeroDouble, 
pStreamBase, vOffsetsHi2, vMaskHi2);
-
-                                pGatherLo  = VCVTPD2PS(pGatherLo);
-                                pGatherLo2 = VCVTPD2PS(pGatherLo2);
-                                pGatherHi  = VCVTPD2PS(pGatherHi);
-                                pGatherHi2 = VCVTPD2PS(pGatherHi2);
-
-                                Value *pGather  = VSHUFFLE(pGatherLo,  
pGatherHi,  C({ 0, 1, 2, 3, 4, 5, 6, 7 }));
-                                Value *pGather2 = VSHUFFLE(pGatherLo2, 
pGatherHi2, C({ 0, 1, 2, 3, 4, 5, 6, 7 }));
-
-                                // pack adjacent pairs of SIMD8s into SIMD16s
-                                pVtxSrc2[currentVertexElement++] = 
JOIN_16(pGather, pGather2);
-                            }
-                            else
-                            {
-                                pVtxSrc2[currentVertexElement++] = 
GenerateCompCtrlVector16(compCtrl[i]);
-                            }
-
-                            if (currentVertexElement > 3)
-                            {
-                                // store SIMD16s
-                                Value *pVtxOut2 = BITCAST(pVtxOut, 
PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
-                                StoreVertexElements16(pVtxOut2, outputElt++, 
4, pVtxSrc2);
-                                // reset to the next vVertexElement to output
-                                currentVertexElement = 0;
-                            }
-                        }
-
-                        // offset base to the next component  in the vertex to 
gather
-                        pStreamBase = GEP(pStreamBase, C((char)8));
-#else
                         if (isComponentEnabled(compMask, i))
                         {
                             // if we need to gather the component
@@ -1475,11 +786,7 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
                             }
                             else
                             {
-#if USE_SIMD16_SHADERS
-                                vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
                                 vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i]);
-#endif
                             }
 
                             if (currentVertexElement > 3)
@@ -1492,7 +799,6 @@ void FetchJit::JitGatherVertices(const FETCH_COMPILE_STATE 
&fetchState,
 
                         // offset base to the next component  in the vertex to 
gather
                         pStreamBase = GEP(pStreamBase, C((char)8));
-#endif
                     }
                 }
                     break;
@@ -1539,9 +845,6 @@ void FetchJit::JitGatherVertices(const FETCH_COMPILE_STATE 
&fetchState,
 
             // value substituted when component of gather is masked
             Value* gatherSrc = VIMMED1(0);
-#if USE_SIMD16_GATHERS
-            Value *gatherSrc16 = VIMMED1_16(0);
-#endif
 
             // Gather components from memory to store in a simdvertex structure
             switch (bpc)
@@ -1551,21 +854,6 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
                     // if we have at least one component to fetch
                     if (compMask)
                     {
-#if USE_SIMD16_GATHERS
-                        Value *gatherResult = GATHERDD_16(gatherSrc16, 
pStreamBase, vOffsets16, vGatherMask16);
-
-                        // e.g. result of an 8x32bit integer gather for 8bit 
components
-                        // 256i - 0    1    2    3    4    5    6    7
-                        //        xyzw xyzw xyzw xyzw xyzw xyzw xyzw xyzw 
-
-                        Value *pVtxOut2 = BITCAST(pVtxOut, 
PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
-                        Shuffle8bpcArgs args = 
std::forward_as_tuple(gatherResult, pVtxOut2, extendCastType, conversionType,
-                            currentVertexElement, outputElt, compMask, 
compCtrl, pVtxSrc2, info.swizzle);
-
-                        // Shuffle gathered components into place in 
simdvertex struct
-                        Shuffle8bpcGatherd16(args);  // outputs to 
vVertexElements ref
-#else
                         Value *vGatherResult = GATHERDD(gatherSrc, 
pStreamBase, vOffsets, vGatherMask);
                         // e.g. result of an 8x32bit integer gather for 8bit 
components
                         // 256i - 0    1    2    3    4    5    6    7
@@ -1575,65 +863,12 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
                             currentVertexElement, outputElt, compMask, 
compCtrl, vVertexElements, info.swizzle);
 
                         // Shuffle gathered components into place in 
simdvertex struct
-#if USE_SIMD16_SHADERS
-                        Shuffle8bpcGatherd(args, useVertexID2); // outputs to 
vVertexElements ref
-#else
-                        Shuffle8bpcGatherd(args); // outputs to 
vVertexElements ref
-#endif
-#endif
+                        mVWidth == 16 ? Shuffle8bpcGatherd16(args) : 
Shuffle8bpcGatherd(args); // outputs to vVertexElements ref
                     }
                 }
                 break;
                 case 16:
                 {
-#if USE_SIMD16_GATHERS
-                    Value *gatherResult[2];
-
-                    // if we have at least one component out of x or y to fetch
-                    if (isComponentEnabled(compMask, 0) || 
isComponentEnabled(compMask, 1))
-                    {
-                        gatherResult[0] = GATHERDD_16(gatherSrc16, 
pStreamBase, vOffsets16, vGatherMask16);
-
-                        // e.g. result of first 8x32bit integer gather for 
16bit components
-                        // 256i - 0    1    2    3    4    5    6    7
-                        //        xyxy xyxy xyxy xyxy xyxy xyxy xyxy xyxy
-                        //
-                    }
-                    else
-                    {
-                        gatherResult[0] = VUNDEF_I_16();
-                    }
-
-                    // if we have at least one component out of z or w to fetch
-                    if (isComponentEnabled(compMask, 2) || 
isComponentEnabled(compMask, 3))
-                    {
-                        // offset base to the next components(zw) in the 
vertex to gather
-                        pStreamBase = GEP(pStreamBase, C((char)4));
-
-                        gatherResult[1] = GATHERDD_16(gatherSrc16, 
pStreamBase, vOffsets16, vGatherMask16);
-
-                        // e.g. result of second 8x32bit integer gather for 
16bit components
-                        // 256i - 0    1    2    3    4    5    6    7
-                        //        zwzw zwzw zwzw zwzw zwzw zwzw zwzw zwzw 
-                        //
-                    }
-                    else
-                    {
-                        gatherResult[1] = VUNDEF_I_16();
-                    }
-
-                    // if we have at least one component to shuffle into place
-                    if (compMask)
-                    {
-                        Value *pVtxOut2 = BITCAST(pVtxOut, 
PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
-                        Shuffle16bpcArgs args = 
std::forward_as_tuple(gatherResult, pVtxOut2, extendCastType, conversionType,
-                            currentVertexElement, outputElt, compMask, 
compCtrl, pVtxSrc2);
-
-                        // Shuffle gathered components into place in 
simdvertex struct
-                        Shuffle16bpcGather16(args);  // outputs to 
vVertexElements ref
-                    }
-#else
                     Value *vGatherResult[2];
 
                     // if we have at least one component out of x or y to fetch
@@ -1666,13 +901,8 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
                             currentVertexElement, outputElt, compMask, 
compCtrl, vVertexElements);
 
                         // Shuffle gathered components into place in 
simdvertex struct
-#if USE_SIMD16_SHADERS
-                        Shuffle16bpcGather(args, useVertexID2);  // outputs to 
vVertexElements ref
-#else
-                        Shuffle16bpcGather(args);  // outputs to 
vVertexElements ref
-#endif
+                        mVWidth == 16 ? Shuffle16bpcGather16(args) : 
Shuffle16bpcGather(args); // outputs to vVertexElements ref
                     }
-#endif
                 }
                 break;
                 case 32:
@@ -1685,28 +915,6 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
                             // if we need to gather the component
                             if (compCtrl[i] == StoreSrc)
                             {
-#if USE_SIMD16_GATHERS
-                                Value *pGather = GATHERDD_16(gatherSrc16, 
pStreamBase, vOffsets16, vGatherMask16);
-
-                                if (conversionType == CONVERT_USCALED)
-                                {
-                                    pGather = UI_TO_FP(pGather, mSimd16FP32Ty);
-                                }
-                                else if (conversionType == CONVERT_SSCALED)
-                                {
-                                    pGather = SI_TO_FP(pGather, mSimd16FP32Ty);
-                                }
-                                else if (conversionType == CONVERT_SFIXED)
-                                {
-                                    pGather = FMUL(SI_TO_FP(pGather, 
mSimd16FP32Ty), VBROADCAST_16(C(1 / 65536.0f)));
-                                }
-
-                                pVtxSrc2[currentVertexElement++] = pGather;
-
-                                // e.g. result of a single 8x32bit integer 
gather for 32bit components
-                                // 256i - 0    1    2    3    4    5    6    7
-                                //        xxxx xxxx xxxx xxxx xxxx xxxx xxxx 
xxxx 
-#else
                                 Value* pGather = GATHERDD(gatherSrc, 
pStreamBase, vOffsets, vGatherMask);
 
                                 if (conversionType == CONVERT_USCALED)
@@ -1727,31 +935,15 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
                                 // e.g. result of a single 8x32bit integer 
gather for 32bit components
                                 // 256i - 0    1    2    3    4    5    6    7
                                 //        xxxx xxxx xxxx xxxx xxxx xxxx xxxx 
xxxx 
-#endif
                             }
                             else
                             {
-#if USE_SIMD16_GATHERS
-                                pVtxSrc2[currentVertexElement++] = 
GenerateCompCtrlVector16(compCtrl[i]);
-#else
-#if USE_SIMD16_SHADERS
-                                vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
                                 vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i]);
-#endif
-#endif
                             }
 
                             if (currentVertexElement > 3)
                             {
-#if USE_SIMD16_GATHERS
-                                // store SIMD16s
-                                Value *pVtxOut2 = BITCAST(pVtxOut, 
PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
-                                StoreVertexElements16(pVtxOut2, outputElt++, 
4, pVtxSrc2);
-#else
                                 StoreVertexElements(pVtxOut, outputElt++, 4, 
vVertexElements);
-#endif
 
                                 // reset to the next vVertexElement to output
                                 currentVertexElement = 0;
@@ -1771,14 +963,7 @@ void FetchJit::JitGatherVertices(const 
FETCH_COMPILE_STATE &fetchState,
     // if we have a partially filled vVertexElement struct, output it
     if (currentVertexElement > 0)
     {
-#if USE_SIMD16_GATHERS
-        // store SIMD16s
-        Value *pVtxOut2 = BITCAST(pVtxOut, 
PointerType::get(VectorType::get(mFP32Ty, mVWidth16), 0));
-
-        StoreVertexElements16(pVtxOut2, outputElt++, currentVertexElement, 
pVtxSrc2);
-#else
         StoreVertexElements(pVtxOut, outputElt++, currentVertexElement, 
vVertexElements);
-#endif
     }
 }
 
@@ -1869,8 +1054,15 @@ Value* FetchJit::GetSimdValid32bitIndices(Value* 
pIndices, Value* pLastIndex)
     numIndicesLeft = SDIV(numIndicesLeft, C(4));
 
     // create a vector of index counts from the base index ptr passed into the 
fetch
-    const std::vector<Constant*> vecIndices {C(0), C(1), C(2), C(3), C(4), 
C(5), C(6), C(7)};
-    Constant* vIndexOffsets = ConstantVector::get(vecIndices);
+    Constant* vIndexOffsets;
+    if (mVWidth == 8)
+    {
+        vIndexOffsets = C({ 0, 1, 2, 3, 4, 5, 6, 7 });
+    }
+    else
+    {
+        vIndexOffsets = C({ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 
15 });
+    }
 
     // compare index count to the max valid index
     // e.g vMaxIndex      4 4 4 4 4 4 4 4 : 4 indices left to load
@@ -1901,7 +1093,6 @@ Value* FetchJit::GetSimdValid32bitIndices(Value* 
pIndices, Value* pLastIndex)
 ///   @param compCtrl - component control val
 ///   @param vVertexElements[4] - vertex components to output
 ///   @param swizzle[4] - component swizzle location
-#if USE_SIMD16_GATHERS
 void FetchJit::Shuffle8bpcGatherd16(Shuffle8bpcArgs &args)
 {
     // Unpack tuple args
@@ -1917,14 +1108,14 @@ void FetchJit::Shuffle8bpcGatherd16(Shuffle8bpcArgs 
&args)
     const uint32_t(&swizzle)[4] = std::get<9>(args);
 
     // cast types
-    Type *vGatherTy = mSimdInt32Ty;
-    Type *v32x8Ty = VectorType::get(mInt8Ty, mVWidth * 4); // vwidth is units 
of 32 bits
+    Type *vGatherTy = VectorType::get(mInt32Ty, 8);
+    Type *v32x8Ty = VectorType::get(mInt8Ty, 32);
 
     // have to do extra work for sign extending
     if ((extendType == Instruction::CastOps::SExt) || (extendType == 
Instruction::CastOps::SIToFP))
     {
-        Type *v16x8Ty = VectorType::get(mInt8Ty, mVWidth * 2); // 8x16bit ints 
in a 128bit lane
-        Type *v128Ty = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 
128), mVWidth / 4); // vwidth is units of 32 bits
+        Type *v16x8Ty = VectorType::get(mInt8Ty, 16); // 8x16bit ints in a 
128bit lane
+        Type *v128Ty = VectorType::get(IntegerType::getIntNTy(JM()->mContext, 
128), 2);
 
         // shuffle mask, including any swizzling
         const char x = (char)swizzle[0]; const char y = (char)swizzle[1];
@@ -1954,8 +1145,8 @@ void FetchJit::Shuffle8bpcGatherd16(Shuffle8bpcArgs &args)
         Value *vi128XY_hi = nullptr;
         if (isComponentEnabled(compMask, 0) || isComponentEnabled(compMask, 1))
         {
-            vi128XY_lo = BITCAST(VPERMD(vShufResult_lo, C<int32_t>({ 0, 4, 0, 
0, 1, 5, 0, 0 })), v128Ty);
-            vi128XY_hi = BITCAST(VPERMD(vShufResult_hi, C<int32_t>({ 0, 4, 0, 
0, 1, 5, 0, 0 })), v128Ty);
+            vi128XY_lo = BITCAST(VSHUFFLE(vShufResult_lo, vShufResult_lo, 
C<int32_t>({ 0, 4, 0, 0, 1, 5, 0, 0 })), v128Ty);
+            vi128XY_hi = BITCAST(VSHUFFLE(vShufResult_hi, vShufResult_hi, 
C<int32_t>({ 0, 4, 0, 0, 1, 5, 0, 0 })), v128Ty);
 
             // after PERMD: move and pack xy and zw components in low 64 bits 
of each 128bit lane
             // 256i - 0    1    2    3    4    5    6    7
@@ -1967,8 +1158,8 @@ void FetchJit::Shuffle8bpcGatherd16(Shuffle8bpcArgs &args)
         Value *vi128ZW_hi = nullptr;
         if (isComponentEnabled(compMask, 2) || isComponentEnabled(compMask, 3))
         {
-            vi128ZW_lo = BITCAST(VPERMD(vShufResult_lo, C<int32_t>({ 2, 6, 0, 
0, 3, 7, 0, 0 })), v128Ty);
-            vi128ZW_hi = BITCAST(VPERMD(vShufResult_hi, C<int32_t>({ 2, 6, 0, 
0, 3, 7, 0, 0 })), v128Ty);
+            vi128ZW_lo = BITCAST(VSHUFFLE(vShufResult_lo, vShufResult_lo, 
C<int32_t>({ 2, 6, 0, 0, 3, 7, 0, 0 })), v128Ty);
+            vi128ZW_hi = BITCAST(VSHUFFLE(vShufResult_hi, vShufResult_hi, 
C<int32_t>({ 2, 6, 0, 0, 3, 7, 0, 0 })), v128Ty);
         }
 
         // init denormalize variables if needed
@@ -2012,25 +1203,26 @@ void FetchJit::Shuffle8bpcGatherd16(Shuffle8bpcArgs 
&args)
                     Value *temp_lo = 
PMOVSXBD(BITCAST(VEXTRACT(selectedPermute_lo, C(lane)), v16x8Ty));
                     Value *temp_hi = 
PMOVSXBD(BITCAST(VEXTRACT(selectedPermute_hi, C(lane)), v16x8Ty));
 
+                    Value* temp = JOIN_16(temp_lo, temp_hi);
+
                     // denormalize if needed
                     if (conversionType != CONVERT_NONE)
                     {
-                        temp_lo = FMUL(CAST(fpCast, temp_lo, mSimdFP32Ty), 
conversionFactor);
-                        temp_hi = FMUL(CAST(fpCast, temp_hi, mSimdFP32Ty), 
conversionFactor);
+                        temp = FMUL(CAST(fpCast, temp, mSimdFP32Ty), 
conversionFactor);
                     }
 
-                    vVertexElements[currentVertexElement] = JOIN_16(temp_lo, 
temp_hi);
+                    vVertexElements[currentVertexElement] = temp;
 
                     currentVertexElement += 1;
                 }
                 else
                 {
-                    vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector16(compCtrl[i]);
+                    vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i]);
                 }
 
                 if (currentVertexElement > 3)
                 {
-                    StoreVertexElements16(pVtxOut, outputElt++, 4, 
vVertexElements);
+                    StoreVertexElements(pVtxOut, outputElt++, 4, 
vVertexElements);
                     // reset to the next vVertexElement to output
                     currentVertexElement = 0;
                 }
@@ -2110,25 +1302,26 @@ void FetchJit::Shuffle8bpcGatherd16(Shuffle8bpcArgs 
&args)
                     // 256i - 0    1    2    3    4    5    6    7
                     //        x000 x000 x000 x000 x000 x000 x000 x000 
 
+                    Value* temp = JOIN_16(temp_lo, temp_hi);
+
                     // denormalize if needed
                     if (conversionType != CONVERT_NONE)
                     {
-                        temp_lo = FMUL(CAST(fpCast, temp_lo, mSimdFP32Ty), 
conversionFactor);
-                        temp_hi = FMUL(CAST(fpCast, temp_hi, mSimdFP32Ty), 
conversionFactor);
+                        temp = FMUL(CAST(fpCast, temp, mSimdFP32Ty), 
conversionFactor);
                     }
 
-                    vVertexElements[currentVertexElement] = JOIN_16(temp_lo, 
temp_hi);
+                    vVertexElements[currentVertexElement] = temp;
 
                     currentVertexElement += 1;
                 }
                 else
                 {
-                    vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector16(compCtrl[i]);
+                    vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i]);
                 }
 
                 if (currentVertexElement > 3)
                 {
-                    StoreVertexElements16(pVtxOut, outputElt++, 4, 
vVertexElements);
+                    StoreVertexElements(pVtxOut, outputElt++, 4, 
vVertexElements);
                     // reset to the next vVertexElement to output
                     currentVertexElement = 0;
                 }
@@ -2141,12 +1334,7 @@ void FetchJit::Shuffle8bpcGatherd16(Shuffle8bpcArgs 
&args)
     }
 }
 
-#else
-#if USE_SIMD16_SHADERS
-void FetchJit::Shuffle8bpcGatherd(Shuffle8bpcArgs &args, bool useVertexID2)
-#else
 void FetchJit::Shuffle8bpcGatherd(Shuffle8bpcArgs &args)
-#endif
 {
     // Unpack tuple args
     Value*& vGatherResult = std::get<0>(args);
@@ -2228,11 +1416,7 @@ void FetchJit::Shuffle8bpcGatherd(Shuffle8bpcArgs &args)
         }
         else
         {
-#if USE_SIMD16_SHADERS
-            vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
             vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i]);
-#endif
         }
 
         if (currentVertexElement > 3)
@@ -2244,7 +1428,6 @@ void FetchJit::Shuffle8bpcGatherd(Shuffle8bpcArgs &args)
     }
 }
 
-#endif
 //////////////////////////////////////////////////////////////////////////
 /// @brief Takes a SIMD of gathered 16bpc verts, zero or sign extends, 
 /// denormalizes if needed, converts to F32 if needed, and positions in 
@@ -2259,7 +1442,6 @@ void FetchJit::Shuffle8bpcGatherd(Shuffle8bpcArgs &args)
 ///   @param compMask - component packing mask
 ///   @param compCtrl - component control val
 ///   @param vVertexElements[4] - vertex components to output
-#if USE_SIMD16_GATHERS
 void FetchJit::Shuffle16bpcGather16(Shuffle16bpcArgs &args)
 {
     // Unpack tuple args
@@ -2274,8 +1456,8 @@ void FetchJit::Shuffle16bpcGather16(Shuffle16bpcArgs 
&args)
     Value* (&vVertexElements)[4] = std::get<8>(args);
 
     // cast types
-    Type *vGatherTy = VectorType::get(IntegerType::getInt32Ty(JM()->mContext), 
mVWidth);
-    Type *v32x8Ty = VectorType::get(mInt8Ty, mVWidth * 4); // vwidth is units 
of 32 bits
+    Type *vGatherTy = VectorType::get(mInt32Ty, 8);
+    Type *v32x8Ty = VectorType::get(mInt8Ty, 32);
 
     // have to do extra work for sign extending
     if ((extendType == Instruction::CastOps::SExt) || (extendType == 
Instruction::CastOps::SIToFP) || (extendType == Instruction::CastOps::FPExt))
@@ -2284,29 +1466,29 @@ void FetchJit::Shuffle16bpcGather16(Shuffle16bpcArgs 
&args)
         bool bFP = (extendType == Instruction::CastOps::FPExt) ? true : false;
 
         Type *v8x16Ty = VectorType::get(mInt16Ty, 8); // 8x16bit in a 128bit 
lane
-        Type *v128bitTy = 
VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128), mVWidth / 4); // 
vwidth is units of 32 bits
+        Type *v128bitTy = 
VectorType::get(IntegerType::getIntNTy(JM()->mContext, 128), 2);
 
         // shuffle mask
-        Value *vConstMask = C<char>({ 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 
10, 11, 14, 15,
-                                      0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 
10, 11, 14, 15 });
+        Value *vConstMask = C<uint8_t>({ 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 
10, 11, 14, 15,
+                                          0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 
7, 10, 11, 14, 15 });
         Value *vi128XY_lo = nullptr;
         Value *vi128XY_hi = nullptr;
         if (isComponentEnabled(compMask, 0) || isComponentEnabled(compMask, 1))
         {
             // SIMD16 PSHUFB isnt part of AVX-512F, so split into SIMD8 for 
the sake of KNL, for now..
 
-            Value *vGatherResult_lo = EXTRACT_16(vGatherResult[0], 0);
-            Value *vGatherResult_hi = EXTRACT_16(vGatherResult[0], 1);
+            Value *vGatherResult_lo = BITCAST(EXTRACT_16(vGatherResult[0], 0), 
v32x8Ty);
+            Value *vGatherResult_hi = BITCAST(EXTRACT_16(vGatherResult[0], 1), 
v32x8Ty);
 
-            Value *vShufResult_lo = BITCAST(PSHUFB(BITCAST(vGatherResult_lo, 
v32x8Ty), vConstMask), vGatherTy);
-            Value *vShufResult_hi = BITCAST(PSHUFB(BITCAST(vGatherResult_hi, 
v32x8Ty), vConstMask), vGatherTy);
+            Value *vShufResult_lo = BITCAST(PSHUFB(vGatherResult_lo, 
vConstMask), vGatherTy);
+            Value *vShufResult_hi = BITCAST(PSHUFB(vGatherResult_hi, 
vConstMask), vGatherTy);
 
             // after pshufb: group components together in each 128bit lane
             // 256i - 0    1    2    3    4    5    6    7
             //        xxxx xxxx yyyy yyyy xxxx xxxx yyyy yyyy
 
-            vi128XY_lo = BITCAST(VPERMD(vShufResult_lo, C<int32_t>({ 0, 1, 4, 
5, 2, 3, 6, 7 })), v128bitTy);
-            vi128XY_hi = BITCAST(VPERMD(vShufResult_hi, C<int32_t>({ 0, 1, 4, 
5, 2, 3, 6, 7 })), v128bitTy);
+            vi128XY_lo = BITCAST(VSHUFFLE(vShufResult_lo, vShufResult_lo, 
C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
+            vi128XY_hi = BITCAST(VSHUFFLE(vShufResult_hi, vShufResult_hi, 
C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
 
             // after PERMD: move and pack xy components into each 128bit lane
             // 256i - 0    1    2    3    4    5    6    7
@@ -2318,14 +1500,14 @@ void FetchJit::Shuffle16bpcGather16(Shuffle16bpcArgs 
&args)
         Value *vi128ZW_hi = nullptr;
         if (isComponentEnabled(compMask, 2) || isComponentEnabled(compMask, 3))
         {
-            Value *vGatherResult_lo = EXTRACT_16(vGatherResult[1], 0);
-            Value *vGatherResult_hi = EXTRACT_16(vGatherResult[1], 1);
+            Value *vGatherResult_lo = BITCAST(EXTRACT_16(vGatherResult[1], 0), 
v32x8Ty);
+            Value *vGatherResult_hi = BITCAST(EXTRACT_16(vGatherResult[1], 1), 
v32x8Ty);
 
-            Value *vShufResult_lo = BITCAST(PSHUFB(BITCAST(vGatherResult_lo, 
v32x8Ty), vConstMask), vGatherTy);
-            Value *vShufResult_hi = BITCAST(PSHUFB(BITCAST(vGatherResult_hi, 
v32x8Ty), vConstMask), vGatherTy);
+            Value *vShufResult_lo = BITCAST(PSHUFB(vGatherResult_lo, 
vConstMask), vGatherTy);
+            Value *vShufResult_hi = BITCAST(PSHUFB(vGatherResult_hi, 
vConstMask), vGatherTy);
 
-            vi128ZW_lo = BITCAST(VPERMD(vShufResult_lo, C<int32_t>({ 0, 1, 4, 
5, 2, 3, 6, 7 })), v128bitTy);
-            vi128ZW_hi = BITCAST(VPERMD(vShufResult_hi, C<int32_t>({ 0, 1, 4, 
5, 2, 3, 6, 7 })), v128bitTy);
+            vi128ZW_lo = BITCAST(VSHUFFLE(vShufResult_lo, vShufResult_lo, 
C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
+            vi128ZW_hi = BITCAST(VSHUFFLE(vShufResult_hi, vShufResult_hi, 
C<int32_t>({ 0, 1, 4, 5, 2, 3, 6, 7 })), v128bitTy);
         }
 
         // init denormalize variables if needed
@@ -2368,8 +1550,10 @@ void FetchJit::Shuffle16bpcGather16(Shuffle16bpcArgs 
&args)
                     if (bFP)
                     {
                         // extract 128 bit lanes to sign extend each component
-                        Value *temp_lo = 
CVTPH2PS(BITCAST(VEXTRACT(selectedPermute_lo, C(lane)), v8x16Ty));
-                        Value *temp_hi = 
CVTPH2PS(BITCAST(VEXTRACT(selectedPermute_hi, C(lane)), v8x16Ty));
+                        /// @todo Force 8-wide cvt until we support generic 
cvt in x86 lowering pass
+                        Function* pCvtPh2Ps = 
Intrinsic::getDeclaration(JM()->mpCurrentModule, Intrinsic::x86_vcvtph2ps_256);
+                        Value *temp_lo = CALL(pCvtPh2Ps, 
BITCAST(VEXTRACT(selectedPermute_lo, C(lane)), v8x16Ty));
+                        Value *temp_hi = CALL(pCvtPh2Ps, 
BITCAST(VEXTRACT(selectedPermute_hi, C(lane)), v8x16Ty));
 
                         vVertexElements[currentVertexElement] = 
JOIN_16(temp_lo, temp_hi);
                     }
@@ -2379,26 +1563,27 @@ void FetchJit::Shuffle16bpcGather16(Shuffle16bpcArgs 
&args)
                         Value *temp_lo = 
PMOVSXWD(BITCAST(VEXTRACT(selectedPermute_lo, C(lane)), v8x16Ty));
                         Value *temp_hi = 
PMOVSXWD(BITCAST(VEXTRACT(selectedPermute_hi, C(lane)), v8x16Ty));
 
+                        Value* temp = JOIN_16(temp_lo, temp_hi);
+
                         // denormalize if needed
                         if (conversionType != CONVERT_NONE)
                         {
-                            temp_lo = FMUL(CAST(IntToFpCast, temp_lo, 
mSimdFP32Ty), conversionFactor);
-                            temp_hi = FMUL(CAST(IntToFpCast, temp_hi, 
mSimdFP32Ty), conversionFactor);
+                            temp = FMUL(CAST(IntToFpCast, temp, mSimdFP32Ty), 
conversionFactor);
                         }
 
-                        vVertexElements[currentVertexElement] = 
JOIN_16(temp_lo, temp_hi);
+                        vVertexElements[currentVertexElement] = temp;
                     }
 
                     currentVertexElement += 1;
                 }
                 else
                 {
-                    vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector16(compCtrl[i]);
+                    vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i]);
                 }
 
                 if (currentVertexElement > 3)
                 {
-                    StoreVertexElements16(pVtxOut, outputElt++, 4, 
vVertexElements);
+                    StoreVertexElements(pVtxOut, outputElt++, 4, 
vVertexElements);
                     // reset to the next vVertexElement to output
                     currentVertexElement = 0;
                 }
@@ -2473,25 +1658,26 @@ void FetchJit::Shuffle16bpcGather16(Shuffle16bpcArgs 
&args)
                     // 256i - 0    1    2    3    4    5    6    7
                     //        xx00 xx00 xx00 xx00 xx00 xx00 xx00 xx00 
 
+                    Value* temp = JOIN_16(temp_lo, temp_hi);
+
                     // denormalize if needed
                     if (conversionType != CONVERT_NONE)
                     {
-                        temp_lo = FMUL(CAST(fpCast, temp_lo, mSimdFP32Ty), 
conversionFactor);
-                        temp_hi = FMUL(CAST(fpCast, temp_hi, mSimdFP32Ty), 
conversionFactor);
+                        temp = FMUL(CAST(fpCast, temp, mSimdFP32Ty), 
conversionFactor);
                     }
 
-                    vVertexElements[currentVertexElement] = JOIN_16(temp_lo, 
temp_hi);
+                    vVertexElements[currentVertexElement] = temp;
 
                     currentVertexElement += 1;
                 }
                 else
                 {
-                    vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector16(compCtrl[i]);
+                    vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i]);
                 }
 
                 if (currentVertexElement > 3)
                 {
-                    StoreVertexElements16(pVtxOut, outputElt++, 4, 
vVertexElements);
+                    StoreVertexElements(pVtxOut, outputElt++, 4, 
vVertexElements);
                     // reset to the next vVertexElement to output
                     currentVertexElement = 0;
                 }
@@ -2504,12 +1690,7 @@ void FetchJit::Shuffle16bpcGather16(Shuffle16bpcArgs 
&args)
     }
 }
 
-#else
-#if USE_SIMD16_SHADERS
-void FetchJit::Shuffle16bpcGather(Shuffle16bpcArgs &args, bool useVertexID2)
-#else
 void FetchJit::Shuffle16bpcGather(Shuffle16bpcArgs &args)
-#endif
 {
     // Unpack tuple args
     Value* (&vGatherResult)[2] = std::get<0>(args);
@@ -2612,11 +1793,7 @@ void FetchJit::Shuffle16bpcGather(Shuffle16bpcArgs &args)
                 }
                 else
                 {
-#if USE_SIMD16_SHADERS
-                    vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
                     vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i]);
-#endif
                 }
 
                 if (currentVertexElement > 3)
@@ -2695,11 +1872,7 @@ void FetchJit::Shuffle16bpcGather(Shuffle16bpcArgs &args)
                 }
                 else
                 {
-#if USE_SIMD16_SHADERS
-                    vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i], useVertexID2);
-#else
                     vVertexElements[currentVertexElement++] = 
GenerateCompCtrlVector(compCtrl[i]);
-#endif
                 }
 
                 if (currentVertexElement > 3)
@@ -2717,42 +1890,12 @@ void FetchJit::Shuffle16bpcGather(Shuffle16bpcArgs 
&args)
     }
 }
 
-#endif
 //////////////////////////////////////////////////////////////////////////
 /// @brief Output a simdvertex worth of elements to the current outputElt
 /// @param pVtxOut - base address of VIN output struct
 /// @param outputElt - simdvertex offset in VIN to write to
 /// @param numEltsToStore - number of simdvertex rows to write out
 /// @param vVertexElements - LLVM Value*[] simdvertex to write out
-#if USE_SIMD16_GATHERS
-void FetchJit::StoreVertexElements16(Value* pVtxOut, const uint32_t outputElt, 
const uint32_t numEltsToStore, Value* (&vVertexElements)[4])
-{
-    SWR_ASSERT(numEltsToStore <= 4, "Invalid element count.");
-
-    for (uint32_t c = 0; c < numEltsToStore; ++c)
-    {
-        // STORE expects FP32 x vWidth type, just bitcast if needed
-        if (!vVertexElements[c]->getType()->getScalarType()->isFloatTy())
-        {
-#if FETCH_DUMP_VERTEX
-            PRINT("vVertexElements[%d]: 0x%x\n", { C(c), vVertexElements[c] });
-#endif
-            vVertexElements[c] = BITCAST(vVertexElements[c], mSimd16FP32Ty);
-        }
-#if FETCH_DUMP_VERTEX
-        else
-        {
-            PRINT("vVertexElements[%d]: %f\n", { C(c), vVertexElements[c] });
-        }
-#endif
-        // outputElt * 4 = offsetting by the size of a simdvertex
-        // + c offsets to a 32bit x vWidth row within the current vertex
-        Value* dest = GEP(pVtxOut, C(outputElt * 4 + c), "destGEP");
-        STORE(vVertexElements[c], dest);
-    }
-}
-
-#else
 void FetchJit::StoreVertexElements(Value* pVtxOut, const uint32_t outputElt, 
const uint32_t numEltsToStore, Value* (&vVertexElements)[4])
 {
     SWR_ASSERT(numEltsToStore <= 4, "Invalid element count.");
@@ -2775,62 +1918,16 @@ void FetchJit::StoreVertexElements(Value* pVtxOut, 
const uint32_t outputElt, con
 #endif
         // outputElt * 4 = offsetting by the size of a simdvertex
         // + c offsets to a 32bit x vWidth row within the current vertex
-#if USE_SIMD16_SHADERS
-        Value* dest = GEP(pVtxOut, C(outputElt * 8 + c * 2), "destGEP");
-#else
         Value* dest = GEP(pVtxOut, C(outputElt * 4 + c), "destGEP");
-#endif
         STORE(vVertexElements[c], dest);
     }
 }
 
-#endif
 //////////////////////////////////////////////////////////////////////////
 /// @brief Generates a constant vector of values based on the 
 /// ComponentControl value
 /// @param ctrl - ComponentControl value
-#if USE_SIMD16_GATHERS
-Value *FetchJit::GenerateCompCtrlVector16(const ComponentControl ctrl)
-{
-    switch (ctrl)
-    {
-        case NoStore:   
-            return VUNDEF_I_16();
-        case Store0:    
-            return VIMMED1_16(0);
-        case Store1Fp:  
-            return VIMMED1_16(1.0f);
-        case Store1Int: 
-            return VIMMED1_16(1);
-        case StoreVertexId:
-        {
-            Value *pId_lo = BITCAST(LOAD(GEP(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_VertexID  })), mSimdFP32Ty);
-            Value *pId_hi = BITCAST(LOAD(GEP(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_VertexID2 })), mSimdFP32Ty);
-
-            Value *pId = JOIN_16(pId_lo, pId_hi);
-
-            return pId;
-        }
-        case StoreInstanceId:
-        {
-            Value *pId = BITCAST(LOAD(GEP(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_CurInstance })), mFP32Ty);
-            return VBROADCAST_16(pId);
-        }
-
-
-        case StoreSrc:
-        default:
-            SWR_INVALID("Invalid component control");
-            return VUNDEF_I_16();
-    }
-}
-
-#else
-#if USE_SIMD16_SHADERS
-Value *FetchJit::GenerateCompCtrlVector(const ComponentControl ctrl, bool 
useVertexID2)
-#else
 Value *FetchJit::GenerateCompCtrlVector(const ComponentControl ctrl)
-#endif
 {
     switch (ctrl)
     {
@@ -2843,22 +1940,19 @@ Value *FetchJit::GenerateCompCtrlVector(const 
ComponentControl ctrl)
     case Store1Int:
         return VIMMED1(1);
     case StoreVertexId:
+    {
+        if (mVWidth == 16)
         {
-#if USE_SIMD16_SHADERS
-            Value *pId;
-            if (useVertexID2)
-            {
-                pId = BITCAST(LOAD(GEP(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_VertexID2 })), mSimdFP32Ty);
-            }
-            else
-            {
-                pId = BITCAST(LOAD(GEP(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_VertexID })), mSimdFP32Ty);
-            }
-#else
-            Value *pId = BITCAST(LOAD(GEP(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_VertexID })), mSimdFP32Ty);
-#endif
-            return pId;
+            Type* pSimd8FPTy = VectorType::get(mFP32Ty, 8);
+            Value *pIdLo = BITCAST(LOAD(GEP(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_VertexID })), pSimd8FPTy);
+            Value *pIdHi = BITCAST(LOAD(GEP(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_VertexID2 })), pSimd8FPTy);
+            return JOIN_16(pIdLo, pIdHi);
+        }
+        else
+        {
+            return BITCAST(LOAD(GEP(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_VertexID })), mSimdFP32Ty);
         }
+    }
     case StoreInstanceId:
         {
             Value *pId = BITCAST(LOAD(GEP(mpFetchInfo, { 0, 
SWR_FETCH_CONTEXT_CurInstance })), mFP32Ty);
@@ -2873,7 +1967,6 @@ Value *FetchJit::GenerateCompCtrlVector(const 
ComponentControl ctrl)
     }
 }
 
-#endif
 //////////////////////////////////////////////////////////////////////////
 /// @brief Returns the enable mask for the specified component.
 /// @param enableMask - enable bits
diff --git a/src/gallium/drivers/swr/rasterizer/jitter/fetch_jit.h 
b/src/gallium/drivers/swr/rasterizer/jitter/fetch_jit.h
index e334c20..de0ec4f 100644
--- a/src/gallium/drivers/swr/rasterizer/jitter/fetch_jit.h
+++ b/src/gallium/drivers/swr/rasterizer/jitter/fetch_jit.h
@@ -101,7 +101,6 @@ struct FETCH_COMPILE_STATE
     uint32_t cutIndex{ 0xffffffff };
 
     // Options that effect the JIT'd code
-    bool bDisableVGATHER;                   // If enabled, FetchJit will 
generate loads/shuffles instead of VGATHERs
     bool bDisableIndexOOBCheck;             // If enabled, FetchJit will 
exclude index OOB check
     bool bEnableCutIndex{ false };          // Compares indices with the cut 
index and returns a cut mask
     bool bVertexIDOffsetEnable{ false };    // Offset vertexID by StartVertex 
for non-indexed draws or BaseVertex for indexed draws
@@ -110,14 +109,13 @@ struct FETCH_COMPILE_STATE
     bool bForceSequentialAccessEnable{ false };
     bool bInstanceIDOffsetEnable{ false };
 
-    FETCH_COMPILE_STATE(bool disableVGATHER = false, bool diableIndexOOBCheck 
= false):
-        bDisableVGATHER(disableVGATHER), 
bDisableIndexOOBCheck(diableIndexOOBCheck){ };
+    FETCH_COMPILE_STATE(bool diableIndexOOBCheck = false):
+        bDisableIndexOOBCheck(diableIndexOOBCheck){ };
 
     bool operator==(const FETCH_COMPILE_STATE &other) const
     {
         if (numAttribs != other.numAttribs) return false;
         if (indexType != other.indexType) return false;
-        if (bDisableVGATHER != other.bDisableVGATHER) return false;
         if (bDisableIndexOOBCheck != other.bDisableIndexOOBCheck) return false;
         if (bEnableCutIndex != other.bEnableCutIndex) return false;
         if (cutIndex != other.cutIndex) return false;
-- 
2.7.4

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to