diff --git a/eng/Subsets.props b/eng/Subsets.props
index d1d57765600006..7dc53d264b7061 100644
--- a/eng/Subsets.props
+++ b/eng/Subsets.props
@@ -219,6 +219,7 @@
+
@@ -322,6 +323,10 @@
$(ClrRuntimeBuildSubsets);ClrJitSubset=true
+
+ $(ClrRuntimeBuildSubsets);ClrWasmJitSubset=true
+
+
$(ClrRuntimeBuildSubsets);ClrPalTestsSubset=true
diff --git a/eng/pipelines/runtime.yml b/eng/pipelines/runtime.yml
index 4b8806316afe4d..22fa05aaf2a857 100644
--- a/eng/pipelines/runtime.yml
+++ b/eng/pipelines/runtime.yml
@@ -289,7 +289,7 @@ extends:
- windows_x64
jobParameters:
nameSuffix: Libraries_CheckedCoreCLR
- buildArgs: -s clr+libs -c $(_BuildConfig) -rc Checked
+ buildArgs: -s clr+clr.wasmjit+libs -c $(_BuildConfig) -rc Checked
timeoutInMinutes: 120
postBuildSteps:
- template: /eng/pipelines/coreclr/templates/build-native-test-assets-step.yml
diff --git a/src/coreclr/build-runtime.cmd b/src/coreclr/build-runtime.cmd
index 8b8d5adcb9d938..b88cd359d6dce9 100644
--- a/src/coreclr/build-runtime.cmd
+++ b/src/coreclr/build-runtime.cmd
@@ -329,6 +329,9 @@ for /f "delims=" %%a in ("-%__RequestedBuildComponents%-") do (
if not "!string:-jit-=!"=="!string!" (
set __CMakeTarget=!__CMakeTarget! jit
)
+ if not "!string:-wasmjit-=!"=="!string!" (
+ set __CMakeTarget=!__CMakeTarget! wasmjit
+ )
if not "!string:-alljits-=!"=="!string!" (
set __CMakeTarget=!__CMakeTarget! alljits
)
diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake
index 707359a3b9a0db..c83f4c3762a2ae 100644
--- a/src/coreclr/clrdefinitions.cmake
+++ b/src/coreclr/clrdefinitions.cmake
@@ -277,12 +277,13 @@ function(set_target_definitions_to_custom_os_and_arch)
target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_LOONGARCH64)
elseif((TARGETDETAILS_ARCH STREQUAL "arm") OR (TARGETDETAILS_ARCH STREQUAL "armel"))
target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_ARM)
- elseif(TARGETDETAILS_ARCH STREQUAL "wasm")
- target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_WASM)
elseif((TARGETDETAILS_ARCH STREQUAL "riscv64"))
target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_64BIT)
target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_RISCV64)
target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE FEATURE_MULTIREG_RETURN)
+ elseif(TARGETDETAILS_ARCH STREQUAL "wasm")
+ target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_WASM)
+ target_compile_definitions(${TARGETDETAILS_TARGET} PRIVATE TARGET_WASM32)
endif()
if (TARGETDETAILS_ARCH STREQUAL "armel")
diff --git a/src/coreclr/components.cmake b/src/coreclr/components.cmake
index 8c3a662928d159..9271a713ebe23d 100644
--- a/src/coreclr/components.cmake
+++ b/src/coreclr/components.cmake
@@ -1,5 +1,6 @@
# Define all the individually buildable components of the CoreCLR build and their respective targets
add_component(jit)
+add_component(wasmjit)
add_component(alljits)
add_component(hosts)
add_component(runtime)
diff --git a/src/coreclr/inc/cordebuginfo.h b/src/coreclr/inc/cordebuginfo.h
index b0e813dffd3ab9..66de82eedc165e 100644
--- a/src/coreclr/inc/cordebuginfo.h
+++ b/src/coreclr/inc/cordebuginfo.h
@@ -213,7 +213,7 @@ class ICorDebugInfo
REGNUM_T5,
REGNUM_T6,
REGNUM_PC,
-#elif TARGET_WASM
+#elif defined(TARGET_WASM)
REGNUM_PC, // wasm doesn't have registers
#else
PORTABILITY_WARNING("Register numbers not defined on this platform")
diff --git a/src/coreclr/jit/CMakeLists.txt b/src/coreclr/jit/CMakeLists.txt
index dd83ed26ff862a..1fc3e3506f6476 100644
--- a/src/coreclr/jit/CMakeLists.txt
+++ b/src/coreclr/jit/CMakeLists.txt
@@ -22,7 +22,7 @@ function(create_standalone_jit)
if(TARGETDETAILS_OS STREQUAL "unix_osx" OR TARGETDETAILS_OS STREQUAL "unix_anyos")
set(JIT_ARCH_LINK_LIBRARIES gcinfo_unix_${TARGETDETAILS_ARCH})
- else()
+ elseif(NOT ${TARGETDETAILS_ARCH} MATCHES "wasm")
set(JIT_ARCH_LINK_LIBRARIES gcinfo_${TARGETDETAILS_OS}_${TARGETDETAILS_ARCH})
endif()
@@ -53,6 +53,9 @@ function(create_standalone_jit)
elseif(TARGETDETAILS_ARCH STREQUAL "riscv64")
set(JIT_ARCH_SOURCES ${JIT_RISCV64_SOURCES})
set(JIT_ARCH_HEADERS ${JIT_RISCV64_HEADERS})
+ elseif(TARGETDETAILS_ARCH STREQUAL "wasm")
+ set(JIT_ARCH_SOURCES ${JIT_WASM_SOURCES})
+ set(JIT_ARCH_HEADERS ${JIT_WASM_HEADERS})
else()
clr_unknown_arch()
endif()
@@ -94,7 +97,6 @@ set( JIT_SOURCES
block.cpp
buildstring.cpp
codegencommon.cpp
- codegenlinear.cpp
compiler.cpp
copyprop.cpp
debuginfo.cpp
@@ -115,8 +117,6 @@ set( JIT_SOURCES
fgstmt.cpp
flowgraph.cpp
forwardsub.cpp
- gcdecode.cpp
- gcencode.cpp
gcinfo.cpp
gentree.cpp
gschecks.cpp
@@ -145,8 +145,6 @@ set( JIT_SOURCES
liveness.cpp
loopcloning.cpp
lower.cpp
- lsra.cpp
- lsrabuild.cpp
morph.cpp
morphblock.cpp
objectalloc.cpp
@@ -163,8 +161,6 @@ set( JIT_SOURCES
rangecheckcloning.cpp
rationalize.cpp
redundantbranchopts.cpp
- regalloc.cpp
- regMaskTPOps.cpp
regset.cpp
scev.cpp
scopeinfo.cpp
@@ -178,11 +174,21 @@ set( JIT_SOURCES
stacklevelsetter.cpp
switchrecognition.cpp
treelifeupdater.cpp
- unwind.cpp
utils.cpp
valuenum.cpp
)
+set ( JIT_NATIVE_TARGET_SOURCES
+ lsra.cpp
+ lsrabuild.cpp
+ regalloc.cpp
+ regMaskTPOps.cpp
+ codegenlinear.cpp
+ gcdecode.cpp
+ gcencode.cpp
+ unwind.cpp
+)
+
if (CLR_CMAKE_TARGET_WIN32)
# Append clrjit.natvis file
list (APPEND JIT_SOURCES
@@ -192,6 +198,7 @@ endif(CLR_CMAKE_TARGET_WIN32)
# Define all the architecture-specific source files
set( JIT_AMD64_SOURCES
+ ${JIT_NATIVE_TARGET_SOURCES}
codegenxarch.cpp
emitxarch.cpp
lowerxarch.cpp
@@ -205,6 +212,7 @@ set( JIT_AMD64_SOURCES
)
set( JIT_ARM_SOURCES
+ ${JIT_NATIVE_TARGET_SOURCES}
codegenarmarch.cpp
codegenarm.cpp
decomposelongs.cpp
@@ -217,6 +225,7 @@ set( JIT_ARM_SOURCES
)
set( JIT_I386_SOURCES
+ ${JIT_NATIVE_TARGET_SOURCES}
codegenxarch.cpp
decomposelongs.cpp
emitxarch.cpp
@@ -231,6 +240,7 @@ set( JIT_I386_SOURCES
)
set( JIT_ARM64_SOURCES
+ ${JIT_NATIVE_TARGET_SOURCES}
codegenarmarch.cpp
codegenarm64.cpp
codegenarm64test.cpp
@@ -260,6 +270,7 @@ set( JIT_POWERPC64_SOURCES
)
set( JIT_LOONGARCH64_SOURCES
+ ${JIT_NATIVE_TARGET_SOURCES}
codegenloongarch64.cpp
emitloongarch64.cpp
lowerloongarch64.cpp
@@ -269,6 +280,7 @@ set( JIT_LOONGARCH64_SOURCES
)
set( JIT_RISCV64_SOURCES
+ ${JIT_NATIVE_TARGET_SOURCES}
codegenriscv64.cpp
emitriscv64.cpp
lowerriscv64.cpp
@@ -277,6 +289,14 @@ set( JIT_RISCV64_SOURCES
unwindriscv64.cpp
)
+set( JIT_WASM_SOURCES
+ codegenwasm.cpp
+ emitwasm.cpp
+ lowerwasm.cpp
+ regallocwasm.cpp
+ targetwasm.cpp
+)
+
# We include the headers here for better experience in IDEs.
set( JIT_HEADERS
../inc/corinfo.h
@@ -448,6 +468,15 @@ set( JIT_RISCV64_HEADERS
registerriscv64.h
)
+set( JIT_WASM_HEADERS
+ regallocwasm.h
+ targetwasm.h
+ emitwasm.h
+ emitfmtswasm.h
+ instrswasm.h
+ registerwasm.h
+)
+
convert_to_absolute_path(JIT_SOURCES ${JIT_SOURCES})
convert_to_absolute_path(JIT_HEADERS ${JIT_HEADERS})
convert_to_absolute_path(JIT_RESOURCES ${JIT_RESOURCES})
@@ -470,6 +499,8 @@ convert_to_absolute_path(JIT_LOONGARCH64_SOURCES ${JIT_LOONGARCH64_SOURCES})
convert_to_absolute_path(JIT_LOONGARCH64_HEADERS ${JIT_LOONGARCH64_HEADERS})
convert_to_absolute_path(JIT_RISCV64_SOURCES ${JIT_RISCV64_SOURCES})
convert_to_absolute_path(JIT_RISCV64_HEADERS ${JIT_RISCV64_HEADERS})
+convert_to_absolute_path(JIT_WASM_SOURCES ${JIT_WASM_SOURCES})
+convert_to_absolute_path(JIT_WASM_HEADERS ${JIT_WASM_HEADERS})
if(CLR_CMAKE_TARGET_ARCH_AMD64)
set(JIT_ARCH_SOURCES ${JIT_AMD64_SOURCES})
@@ -499,13 +530,12 @@ elseif(CLR_CMAKE_TARGET_ARCH_RISCV64)
set(JIT_ARCH_SOURCES ${JIT_RISCV64_SOURCES})
set(JIT_ARCH_HEADERS ${JIT_RISCV64_HEADERS})
elseif(CLR_CMAKE_TARGET_ARCH_WASM)
- set(JIT_ARCH_SOURCES ${JIT_WASM32_SOURCES})
- set(JIT_ARCH_HEADERS ${JIT_WASM32_HEADERS})
+ set(JIT_ARCH_SOURCES ${JIT_WASM_SOURCES})
+ set(JIT_ARCH_HEADERS ${JIT_WASM_HEADERS})
else()
clr_unknown_arch()
endif()
-
set(JIT_DLL_MAIN_FILE ${CMAKE_CURRENT_LIST_DIR}/dllmain.cpp)
if(CLR_CMAKE_HOST_WIN32)
@@ -623,7 +653,12 @@ function(add_jit jitName)
target_compile_definitions(${jitName} PRIVATE JIT_STANDALONE_BUILD)
# add the install targets
- install_clr(TARGETS ${jitName} DESTINATIONS . COMPONENT alljits)
+ if (${jitName} MATCHES "clrjit_universal_wasm")
+ # TODO-WASM: add the WASM Jit to alljits once that becomes necessary.
+ install_clr(TARGETS ${jitName} DESTINATIONS . COMPONENT wasmjit)
+ else()
+ install_clr(TARGETS ${jitName} DESTINATIONS . COMPONENT alljits)
+ endif()
endfunction()
set(JIT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
@@ -653,6 +688,7 @@ add_pgo(clrjit)
if (CLR_CMAKE_TARGET_ARCH_ARM64 OR CLR_CMAKE_TARGET_ARCH_AMD64)
create_standalone_jit(TARGET clrjit_universal_arm64_${ARCH_HOST_NAME} OS universal ARCH arm64)
+ create_standalone_jit(TARGET clrjit_universal_wasm_${ARCH_HOST_NAME} OS universal ARCH wasm)
create_standalone_jit(TARGET clrjit_unix_x64_${ARCH_HOST_NAME} OS unix_anyos ARCH x64)
create_standalone_jit(TARGET clrjit_win_x64_${ARCH_HOST_NAME} OS win ARCH x64)
if (CLR_CMAKE_BUILD_COMMUNITY_ALTJITS EQUAL 1)
@@ -678,13 +714,13 @@ if (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX)
endif (CLR_CMAKE_TARGET_ARCH_I386 AND CLR_CMAKE_TARGET_UNIX)
if (CLR_CMAKE_TARGET_UNIX)
- if (NOT ARCH_TARGET_NAME STREQUAL s390x AND NOT ARCH_TARGET_NAME STREQUAL armv6 AND NOT ARCH_TARGET_NAME STREQUAL ppc64le AND NOT ARCH_TARGET_NAME STREQUAL riscv64)
+ if (NOT ARCH_TARGET_NAME STREQUAL s390x AND NOT ARCH_TARGET_NAME STREQUAL armv6 AND NOT ARCH_TARGET_NAME STREQUAL ppc64le AND NOT ARCH_TARGET_NAME STREQUAL riscv64 AND NOT ARCH_TARGET_NAME STREQUAL wasm)
if(CLR_CMAKE_TARGET_ARCH_ARM OR CLR_CMAKE_TARGET_ARCH_ARM64)
install_clr(TARGETS clrjit_universal_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit)
else()
install_clr(TARGETS clrjit_unix_${ARCH_TARGET_NAME}_${ARCH_HOST_NAME} DESTINATIONS . COMPONENT jit)
endif()
- endif(NOT ARCH_TARGET_NAME STREQUAL s390x AND NOT ARCH_TARGET_NAME STREQUAL armv6 AND NOT ARCH_TARGET_NAME STREQUAL ppc64le AND NOT ARCH_TARGET_NAME STREQUAL riscv64)
+ endif()
endif()
if (CLR_CMAKE_TARGET_WIN32 AND CLR_CMAKE_PGO_INSTRUMENT)
diff --git a/src/coreclr/jit/abi.cpp b/src/coreclr/jit/abi.cpp
index 21e20fd00820c1..da25788f30d63c 100644
--- a/src/coreclr/jit/abi.cpp
+++ b/src/coreclr/jit/abi.cpp
@@ -41,6 +41,7 @@ regNumber ABIPassingSegment::GetRegister() const
return static_cast(m_register);
}
+#if HAS_FIXED_REGISTER_SET
//-----------------------------------------------------------------------------
// GetRegisterMask:
// Get the mask of registers that this segment is passed in.
@@ -62,6 +63,7 @@ regMaskTP ABIPassingSegment::GetRegisterMask() const
return mask;
}
+#endif // HAS_FIXED_REGISTER_SET
//-----------------------------------------------------------------------------
// GetStackOffset:
diff --git a/src/coreclr/jit/abi.h b/src/coreclr/jit/abi.h
index dcb23cfd37470c..7b90962bcff28c 100644
--- a/src/coreclr/jit/abi.h
+++ b/src/coreclr/jit/abi.h
@@ -331,6 +331,22 @@ class LoongArch64Classifier
WellKnownArg wellKnownParam);
};
+class WasmClassifier
+{
+public:
+ WasmClassifier(const ClassifierInfo& info);
+
+ unsigned StackSize()
+ {
+ return 0;
+ }
+
+ ABIPassingInformation Classify(Compiler* comp,
+ var_types type,
+ ClassLayout* structLayout,
+ WellKnownArg wellKnownParam);
+};
+
#if defined(TARGET_X86)
typedef X86Classifier PlatformClassifier;
#elif defined(WINDOWS_AMD64_ABI)
@@ -345,6 +361,8 @@ typedef Arm32Classifier PlatformClassifier;
typedef RiscV64Classifier PlatformClassifier;
#elif defined(TARGET_LOONGARCH64)
typedef LoongArch64Classifier PlatformClassifier;
+#elif defined(TARGET_WASM)
+typedef WasmClassifier PlatformClassifier;
#endif
#ifdef SWIFT_SUPPORT
diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp
index 78457cf6255e37..7d0f7b3b5f40b0 100644
--- a/src/coreclr/jit/codegencommon.cpp
+++ b/src/coreclr/jit/codegencommon.cpp
@@ -23,7 +23,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "gcinfo.h"
#include "emit.h"
-#ifndef JIT32_GCENCODER
+#if EMIT_GENERATE_GCINFO && !defined(JIT32_GCENCODER)
#include "gcinfoencoder.h"
#endif
@@ -58,12 +58,17 @@ void CodeGenInterface::setFramePointerRequiredEH(bool value)
#endif // JIT32_GCENCODER
}
-/*****************************************************************************/
CodeGenInterface* getCodeGenerator(Compiler* comp)
{
return new (comp, CMK_Codegen) CodeGen(comp);
}
+// TODO-WASM-Factoring: this ifdef factoring is temporary. The end factoring should look like this:
+// 1. Everything shared by all codegen backends (incl. WASM) stays here.
+// 2. Everything else goes into codegenlinear.cpp.
+// 3. codegenlinear.cpp gets renamed to codegennative.cpp.
+//
+#ifndef TARGET_WASM
NodeInternalRegisters::NodeInternalRegisters(Compiler* comp)
: m_table(comp->getAllocator(CMK_LSRA))
{
@@ -170,16 +175,6 @@ unsigned NodeInternalRegisters::Count(GenTree* tree, regMaskTP mask)
return m_table.Lookup(tree, ®s) ? genCountBits(regs & mask) : 0;
}
-// CodeGen constructor
-CodeGenInterface::CodeGenInterface(Compiler* theCompiler)
- : gcInfo(theCompiler)
- , regSet(theCompiler, gcInfo)
- , internalRegisters(theCompiler)
- , compiler(theCompiler)
- , treeLifeUpdater(nullptr)
-{
-}
-
#if defined(TARGET_XARCH)
void CodeGenInterface::CopyRegisterInfo()
{
@@ -195,8 +190,19 @@ void CodeGenInterface::CopyRegisterInfo()
rbmMskCalleeTrash = compiler->rbmMskCalleeTrash;
}
#endif // TARGET_XARCH
+#endif // !TARGET_WASM
-/*****************************************************************************/
+// CodeGen constructor
+CodeGenInterface::CodeGenInterface(Compiler* theCompiler)
+ : gcInfo(theCompiler)
+ , regSet(theCompiler, gcInfo)
+#if HAS_FIXED_REGISTER_SET
+ , internalRegisters(theCompiler)
+#endif // HAS_FIXED_REGISTER_SET
+ , compiler(theCompiler)
+ , treeLifeUpdater(nullptr)
+{
+}
CodeGen::CodeGen(Compiler* theCompiler)
: CodeGenInterface(theCompiler)
@@ -262,6 +268,7 @@ CodeGen::CodeGen(Compiler* theCompiler)
#endif // TARGET_ARM64
}
+#ifndef TARGET_WASM
#if defined(TARGET_X86) || defined(TARGET_ARM)
//---------------------------------------------------------------------
@@ -654,6 +661,7 @@ bool CodeGen::genIsSameLocalVar(GenTree* op1, GenTree* op2)
return op1Skip->OperIs(GT_LCL_VAR) && op2Skip->OperIs(GT_LCL_VAR) &&
(op1Skip->AsLclVar()->GetLclNum() == op2Skip->AsLclVar()->GetLclNum());
}
+#endif // !TARGET_WASM
// The given lclVar is either going live (being born) or dying.
// It might be both going live and dying (that is, it is a dead store) under MinOpts.
@@ -661,6 +669,7 @@ bool CodeGen::genIsSameLocalVar(GenTree* op1, GenTree* op2)
// inline
void CodeGenInterface::genUpdateRegLife(const LclVarDsc* varDsc, bool isBorn, bool isDying DEBUGARG(GenTree* tree))
{
+#if EMIT_GENERATE_GCINFO // The regset being updated here is only needed for codegen-level GCness tracking
regMaskTP regMask = genGetRegMask(varDsc);
#ifdef DEBUG
@@ -690,8 +699,10 @@ void CodeGenInterface::genUpdateRegLife(const LclVarDsc* varDsc, bool isBorn, bo
assert(varDsc->IsAlwaysAliveInMemory() || ((regSet.GetMaskVars() & regMask) == 0));
regSet.AddMaskVars(regMask);
}
+#endif // EMIT_GENERATE_GCINFO
}
+#ifndef TARGET_WASM
//----------------------------------------------------------------------
// compHelperCallKillSet: Gets a register mask that represents the kill set for a helper call.
// Not all JIT Helper calls follow the standard ABI on the target architecture.
@@ -1084,6 +1095,7 @@ void CodeGen::genAdjustStackLevel(BasicBlock* block)
}
#endif // !FEATURE_FIXED_OUT_ARGS
}
+#endif // !TARGET_WASM
//------------------------------------------------------------------------
// genCreateAddrMode:
@@ -1525,6 +1537,7 @@ bool CodeGen::genCreateAddrMode(GenTree* addr,
return true;
}
+#ifndef TARGET_WASM
//------------------------------------------------------------------------
// genEmitCallWithCurrentGC:
// Emit a call with GC information captured from current GC information.
@@ -5835,6 +5848,7 @@ XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
+#endif // !TARGET_WASM
//-----------------------------------------------------------------------------------
// IsMultiRegReturnedType: Returns true if the type is returned in multiple registers
@@ -5918,6 +5932,7 @@ unsigned Compiler::GetHfaCount(CORINFO_CLASS_HANDLE hClass)
#endif // TARGET_ARM64
}
+#ifndef TARGET_WASM
//------------------------------------------------------------------------------------------------ //
// getFirstArgWithStackSlot - returns the first argument with stack slot on the caller's frame.
//
@@ -6117,6 +6132,7 @@ void CodeGen::genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefReg
#endif // FEATURE_FIXED_OUT_ARGS
}
+#endif // !TARGET_WASM
#ifdef DEBUG
@@ -6276,6 +6292,7 @@ void CodeGen::genIPmappingAddToFront(IPmappingDscKind kind, const DebugInfo& di,
#endif // DEBUG
}
+#ifndef TARGET_WASM
/*****************************************************************************/
void CodeGen::genEnsureCodeEmitted(const DebugInfo& di)
@@ -7867,12 +7884,14 @@ void CodeGen::genStackPointerCheck(bool doStackPointerCheck,
}
#endif // defined(DEBUG) && defined(TARGET_XARCH)
+#endif // !TARGET_WASM
unsigned CodeGenInterface::getCurrentStackLevel() const
{
return genStackLevel;
}
+#ifndef TARGET_WASM
//-----------------------------------------------------------------------------
// genPoisonFrame: Generate code that places a recognizable value into address exposed variables.
//
@@ -8115,3 +8134,4 @@ void CodeGen::genCodeForSwiftErrorReg(GenTree* tree)
genProduceReg(tree);
}
#endif // SWIFT_SUPPORT
+#endif // !TARGET_WASM
diff --git a/src/coreclr/jit/codegeninterface.h b/src/coreclr/jit/codegeninterface.h
index 5b5219f06d97d8..7dda0e0630478b 100644
--- a/src/coreclr/jit/codegeninterface.h
+++ b/src/coreclr/jit/codegeninterface.h
@@ -37,8 +37,6 @@ class emitter;
struct RegState
{
regMaskTP rsCalleeRegArgMaskLiveIn; // mask of register arguments (live on entry to method)
- unsigned rsCalleeRegArgCount; // total number of incoming register arguments of this kind (int or float)
- bool rsIsFloat; // true for float argument registers, false for integer argument registers
};
//-------------------- CodeGenInterface ---------------------------------
@@ -155,12 +153,14 @@ class CodeGenInterface
unsigned* mulPtr,
ssize_t* cnsPtr) = 0;
- GCInfo gcInfo;
+ GCInfo gcInfo;
+ RegSet regSet;
+ RegState intRegState;
+ RegState floatRegState;
- RegSet regSet;
- RegState intRegState;
- RegState floatRegState;
+#if HAS_FIXED_REGISTER_SET
NodeInternalRegisters internalRegisters;
+#endif
protected:
Compiler* compiler;
@@ -169,7 +169,8 @@ class CodeGenInterface
private:
#if defined(TARGET_XARCH)
static const insFlags instInfo[INS_count];
-#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
+#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) || \
+ defined(TARGET_WASM)
static const BYTE instInfo[INS_count];
#else
#error Unsupported target architecture
diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp
index 339ac1eab1610d..5f9e9390527645 100644
--- a/src/coreclr/jit/codegenlinear.cpp
+++ b/src/coreclr/jit/codegenlinear.cpp
@@ -1072,9 +1072,6 @@ void CodeGen::genUnspillLocal(
// due to issues with LSRA resolution moves.
// So, just force it for now. This probably indicates a condition that creates a GC hole!
//
- // Extra note: I think we really want to call something like gcInfo.gcUpdateForRegVarMove,
- // because the variable is not really going live or dead, but that method is somewhat poorly
- // factored because it, in turn, updates rsMaskVars which is part of RegSet not GCInfo.
// TODO-Cleanup: This code exists in other CodeGen*.cpp files, and should be moved to CodeGenCommon.cpp.
// Don't update the variable's location if we are just re-spilling it again.
diff --git a/src/coreclr/jit/codegenwasm.cpp b/src/coreclr/jit/codegenwasm.cpp
new file mode 100644
index 00000000000000..43e19ec5b04de0
--- /dev/null
+++ b/src/coreclr/jit/codegenwasm.cpp
@@ -0,0 +1,117 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "jitpch.h"
+#ifdef _MSC_VER
+#pragma hdrstop
+#endif
+
+#include "codegen.h"
+
+void CodeGen::genGenerateCode(void** codePtr, uint32_t* nativeSizeOfCode)
+{
+ NYI_WASM("Undef genGenerateCode in codegencommon.cpp and proceed from there");
+}
+
+void CodeGen::genSpillVar(GenTree* tree)
+{
+ NYI_WASM("Put all spillng to memory under '#if HAS_FIXED_REGISTER_SET'");
+}
+
+void CodeGen::genFnEpilog(BasicBlock* block)
+{
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("*************** In genFnEpilog()\n");
+ }
+#endif // DEBUG
+
+ NYI_WASM("genFnEpilog");
+}
+
+void CodeGen::genFuncletProlog(BasicBlock* block)
+{
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("*************** In genFuncletProlog()\n");
+ }
+#endif
+
+ NYI_WASM("genFuncletProlog");
+}
+
+void CodeGen::genFuncletEpilog()
+{
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("*************** In genFuncletEpilog()\n");
+ }
+#endif
+
+ NYI_WASM("genFuncletEpilog");
+}
+
+//---------------------------------------------------------------------
+// genTotalFrameSize - return the total size of the linear memory stack frame.
+//
+// Return value:
+// Total linear memory frame size
+//
+int CodeGenInterface::genTotalFrameSize() const
+{
+ assert(compiler->compLclFrameSize >= 0);
+ return compiler->compLclFrameSize;
+}
+
+//---------------------------------------------------------------------
+// genSPtoFPdelta - return the offset from SP to the frame pointer.
+// This number is going to be positive, since SP must be at the lowest
+// address.
+//
+// There must be a frame pointer to call this function!
+int CodeGenInterface::genSPtoFPdelta() const
+{
+ assert(isFramePointerUsed());
+ NYI_WASM("genSPtoFPdelta");
+ return 0;
+}
+
+//---------------------------------------------------------------------
+// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
+// This number is going to be negative, since the Caller-SP is at a higher
+// address than the frame pointer.
+//
+// There must be a frame pointer to call this function!
+int CodeGenInterface::genCallerSPtoFPdelta() const
+{
+ assert(isFramePointerUsed());
+ NYI_WASM("genCallerSPtoFPdelta");
+ return 0;
+}
+
+//---------------------------------------------------------------------
+// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
+//
+// This number will be negative.
+int CodeGenInterface::genCallerSPtoInitialSPdelta() const
+{
+ NYI_WASM("genCallerSPtoInitialSPdelta");
+ return 0;
+}
+
+void CodeGenInterface::genUpdateVarReg(LclVarDsc* varDsc, GenTree* tree, int regIndex)
+{
+ NYI_WASM("Move genUpdateVarReg from codegenlinear.cpp to codegencommon.cpp shared code");
+}
+
+void CodeGenInterface::genUpdateVarReg(LclVarDsc* varDsc, GenTree* tree)
+{
+ NYI_WASM("Move genUpdateVarReg from codegenlinear.cpp to codegencommon.cpp shared code");
+}
+
+void RegSet::verifyRegUsed(regNumber reg)
+{
+}
diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp
index f5c618fccea289..19a720cfc9540d 100644
--- a/src/coreclr/jit/compiler.cpp
+++ b/src/coreclr/jit/compiler.cpp
@@ -1836,41 +1836,42 @@ const char* Compiler::compRegVarName(regNumber reg, bool displayVar, bool isFloa
const char* Compiler::compRegNameForSize(regNumber reg, size_t size)
{
- if (size == 0 || size >= 4)
- {
- return compRegVarName(reg, true);
- }
+#if CPU_HAS_BYTE_REGS
+ if (size == 1 || size == 2)
+ {
+ // clang-format off
+ static const char* sizeNames[][2] =
+ {
+ { "al", "ax" },
+ { "cl", "cx" },
+ { "dl", "dx" },
+ { "bl", "bx" },
+ #ifdef TARGET_AMD64
+ { "spl", "sp" }, // ESP
+ { "bpl", "bp" }, // EBP
+ { "sil", "si" }, // ESI
+ { "dil", "di" }, // EDI
+ { "r8b", "r8w" },
+ { "r9b", "r9w" },
+ { "r10b", "r10w" },
+ { "r11b", "r11w" },
+ { "r12b", "r12w" },
+ { "r13b", "r13w" },
+ { "r14b", "r14w" },
+ { "r15b", "r15w" },
+ #endif // TARGET_AMD64
+ };
+ // clang-format on
- // clang-format off
- static
- const char * sizeNames[][2] =
- {
- { "al", "ax" },
- { "cl", "cx" },
- { "dl", "dx" },
- { "bl", "bx" },
-#ifdef TARGET_AMD64
- { "spl", "sp" }, // ESP
- { "bpl", "bp" }, // EBP
- { "sil", "si" }, // ESI
- { "dil", "di" }, // EDI
- { "r8b", "r8w" },
- { "r9b", "r9w" },
- { "r10b", "r10w" },
- { "r11b", "r11w" },
- { "r12b", "r12w" },
- { "r13b", "r13w" },
- { "r14b", "r14w" },
- { "r15b", "r15w" },
-#endif // TARGET_AMD64
- };
- // clang-format on
+ assert(isByteReg(reg));
+ assert(genRegMask(reg) & RBM_BYTE_REGS);
+ assert(size == 1 || size == 2);
- assert(isByteReg(reg));
- assert(genRegMask(reg) & RBM_BYTE_REGS);
- assert(size == 1 || size == 2);
+ return sizeNames[reg][size - 1];
+ }
+#endif // CPU_HAS_BYTE_REGS
- return sizeNames[reg][size - 1];
+ return compRegVarName(reg, true);
}
#ifdef DEBUG
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index e7f3dae76f4102..71d1b90b133710 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -886,6 +886,7 @@ class LclVarDsc
return lvIsRegCandidate() && (GetRegNum() != REG_STK);
}
+#if HAS_FIXED_REGISTER_SET
regMaskTP lvRegMask() const
{
if (GetRegNum() != REG_STK)
@@ -913,6 +914,7 @@ class LclVarDsc
return RBM_NONE;
}
}
+#endif // HAS_FIXED_REGISTER_SET
//-----------------------------------------------------------------------------
// AllFieldDeathFlags: Get a bitset of flags that represents all fields dying.
@@ -8411,6 +8413,9 @@ class Compiler
#elif defined(TARGET_RISCV64)
reg = REG_T5;
regMask = RBM_T5;
+#elif defined(TARGET_WASM)
+ reg = REG_NA;
+ regMask = RBM_NONE;
#else
#error Unsupported or unset target architecture
#endif
@@ -12405,6 +12410,10 @@ const instruction INS_SQRT = INS_fsqrt_d; // NOTE: default is double.
const instruction INS_BREAKPOINT = INS_ebreak;
#endif // TARGET_RISCV64
+#ifdef TARGET_WASM
+const instruction INS_BREAKPOINT = INS_unreachable;
+#endif
+
/*****************************************************************************/
extern const BYTE genTypeSizes[];
diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp
index 12280cde617228..02bfd82ce6555d 100644
--- a/src/coreclr/jit/compiler.hpp
+++ b/src/coreclr/jit/compiler.hpp
@@ -918,6 +918,7 @@ inline unsigned Compiler::funGetFuncIdx(BasicBlock* block)
}
}
+#if HAS_FIXED_REGISTER_SET
//------------------------------------------------------------------------------
// genRegNumFromMask : Maps a single register mask to a register number.
//
@@ -1131,6 +1132,7 @@ inline regNumber genFirstRegNumFromMaskAndToggle(SingleTypeRegSet& mask)
return regNum;
}
+#endif // HAS_FIXED_REGISTER_SET
/*****************************************************************************
*
@@ -3570,6 +3572,7 @@ inline int getJitStressLevel()
#endif // DEBUG
+#if HAS_FIXED_REGISTER_SET
/*****************************************************************************/
/* Map a register argument number ("RegArgNum") to a register number ("RegNum").
* A RegArgNum is in this range:
@@ -3777,6 +3780,7 @@ inline unsigned genMapRegNumToRegArgNum(regNumber regNum, var_types type, CorInf
return genMapIntRegNumToRegArgNum(regNum, callConv);
}
}
+#endif // HAS_FIXED_REGISTER_SET
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp
index 2aa394f5560bed..fbe3afe18d7995 100644
--- a/src/coreclr/jit/emit.cpp
+++ b/src/coreclr/jit/emit.cpp
@@ -3463,6 +3463,7 @@ const char* emitter::emitGetFrameReg()
void emitter::emitDispRegSet(regMaskTP regs)
{
+#if HAS_FIXED_REGISTER_SET
regNumber reg;
bool sp = false;
@@ -3496,6 +3497,7 @@ void emitter::emitDispRegSet(regMaskTP regs)
}
printf("}");
+#endif // HAS_FIXED_REGISTER_SET
}
/*****************************************************************************
@@ -3834,6 +3836,9 @@ const size_t hexEncodingSize = 19;
#elif defined(TARGET_RISCV64)
const size_t basicIndent = 12;
const size_t hexEncodingSize = 19;
+#elif defined(TARGET_WASM)
+const size_t basicIndent = 12;
+const size_t hexEncodingSize = 19; // 8 bytes (wasm-objdump default) + 1 space.
#endif
#ifdef DEBUG
@@ -5505,6 +5510,9 @@ void emitter::emitJumpDistBind()
assert((sizeDif == 4) || (sizeDif == 8));
#elif defined(TARGET_RISCV64)
assert((sizeDif == 0) || (sizeDif == 4) || (sizeDif == 8));
+#elif defined(TARGET_WASM)
+ // TODO-WASM: likely the whole thing needs to be made unreachable.
+ NYI_WASM("emitJumpDistBind");
#else
#error Unsupported or unset target architecture
#endif
@@ -7685,6 +7693,8 @@ unsigned emitter::emitEndCodeGen(Compiler* comp,
#elif defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
// For LoongArch64 and RiscV64 `emitFwdJumps` is always false.
unreached();
+#elif defined(TARGET_WASM)
+ NYI_WASM("Short jump distance adjustment");
#else
#error Unsupported or unset target architecture
#endif
@@ -7700,6 +7710,8 @@ unsigned emitter::emitEndCodeGen(Compiler* comp,
#elif defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
// For LoongArch64 and RiscV64 `emitFwdJumps` is always false.
unreached();
+#elif defined(TARGET_WASM)
+ NYI_WASM("Jump distance adjustment");
#else
#error Unsupported or unset target architecture
#endif
@@ -9109,6 +9121,7 @@ void emitter::emitUpdateLiveGCregs(GCtype gcType, regMaskTP regs, BYTE* addr)
return;
}
+#if EMIT_GENERATE_GCINFO
regMaskTP life;
regMaskTP dead;
regMaskTP chg;
@@ -9163,6 +9176,7 @@ void emitter::emitUpdateLiveGCregs(GCtype gcType, regMaskTP regs, BYTE* addr)
// The 2 GC reg masks can't be overlapping
assert((emitThisGCrefRegs & emitThisByrefRegs) == 0);
+#endif // EMIT_GENERATE_GCINFO
}
/*****************************************************************************
@@ -9430,6 +9444,7 @@ void emitter::emitGCregLiveUpd(GCtype gcType, regNumber reg, BYTE* addr)
{
assert(emitIssuing);
+#if EMIT_GENERATE_GCINFO
// Don't track GC changes in epilogs
if (emitIGisInEpilog(emitCurIG))
{
@@ -9471,6 +9486,7 @@ void emitter::emitGCregLiveUpd(GCtype gcType, regNumber reg, BYTE* addr)
// The 2 GC reg masks can't be overlapping
assert((emitThisGCrefRegs & emitThisByrefRegs) == 0);
+#endif // EMIT_GENERATE_GCINFO
}
/*****************************************************************************
@@ -9488,6 +9504,7 @@ void emitter::emitGCregDeadUpdMask(regMaskTP regs, BYTE* addr)
return;
}
+#if EMIT_GENERATE_GCINFO
// First, handle the gcref regs going dead
regMaskTP gcrefRegs = emitThisGCrefRegs & regs;
@@ -9523,6 +9540,7 @@ void emitter::emitGCregDeadUpdMask(regMaskTP regs, BYTE* addr)
emitThisByrefRegs &= ~byrefRegs;
}
+#endif // EMIT_GENERATE_GCINFO
}
/*****************************************************************************
@@ -9534,6 +9552,7 @@ void emitter::emitGCregDeadUpd(regNumber reg, BYTE* addr)
{
assert(emitIssuing);
+#if EMIT_GENERATE_GCINFO
// Don't track GC changes in epilogs
if (emitIGisInEpilog(emitCurIG))
{
@@ -9562,6 +9581,7 @@ void emitter::emitGCregDeadUpd(regNumber reg, BYTE* addr)
emitThisByrefRegs &= ~regMask;
}
+#endif // EMIT_GENERATE_GCINFO
}
/*****************************************************************************
@@ -10534,6 +10554,7 @@ const char* emitter::emitOffsetToLabel(unsigned offs)
#endif // DEBUG
+#if HAS_FIXED_REGISTER_SET
//------------------------------------------------------------------------
// emitGetGCRegsSavedOrModified: Returns the set of registers that keeps gcrefs and byrefs across the call.
//
@@ -10649,6 +10670,7 @@ regMaskTP emitter::emitGetGCRegsKilledByNoGCCall(CorInfoHelpFunc helper)
return result;
}
+#endif // HAS_FIXED_REGISTER_SET
//------------------------------------------------------------------------
// emitDisableGC: Requests that the following instruction groups are not GC-interruptible.
diff --git a/src/coreclr/jit/emit.h b/src/coreclr/jit/emit.h
index 0b14c76ad2e969..aa9b50eeb07bf1 100644
--- a/src/coreclr/jit/emit.h
+++ b/src/coreclr/jit/emit.h
@@ -758,6 +758,7 @@ class emitter
// arm64: 21 bits
// loongarch64: 14 bits
// risc-v: 14 bits
+ // wasm: 16 bits (TODO-WASM-TP: shrink the format field)
private:
#if defined(TARGET_XARCH)
@@ -806,6 +807,7 @@ class emitter
// arm64: 46 bits
// loongarch64: 28 bits
// risc-v: 28 bits
+ // wasm: 20 bits (TODO-WASM-TP: remove the reg fields)
unsigned _idSmallDsc : 1; // is this a "small" descriptor?
unsigned _idLargeCns : 1; // does a large constant follow? (or if large call descriptor used)
@@ -898,6 +900,7 @@ class emitter
// arm64: 55 bits
// loongarch64: 46 bits
// risc-v: 46 bits
+ // wasm: 28 bits
//
// How many bits have been used beyond the first 32?
@@ -914,6 +917,8 @@ class emitter
#define ID_EXTRA_BITFIELD_BITS (18)
#elif defined(TARGET_AMD64)
#define ID_EXTRA_BITFIELD_BITS (20)
+#elif defined(TARGET_WASM)
+#define ID_EXTRA_BITFIELD_BITS (-4)
#else
#error Unsupported or unset target architecture
#endif
@@ -953,12 +958,17 @@ class emitter
// arm64: 62/57 bits
// loongarch64: 53/48 bits
// risc-v: 53/48 bits
+ // wasm: 35/30 bits
#define ID_EXTRA_BITS (ID_EXTRA_RELOC_BITS + ID_EXTRA_BITFIELD_BITS + ID_EXTRA_PREV_OFFSET_BITS)
/* Use whatever bits are left over for small constants */
+#if ID_EXTRA_BITS <= 0
+#define ID_BIT_SMALL_CNS 30 // Not 32 or 31 here to avoid breaking the math below.
+#else
#define ID_BIT_SMALL_CNS (32 - ID_EXTRA_BITS)
+#endif
////////////////////////////////////////////////////////////////////////
// Small constant size (with/without prev offset, assuming host==target):
@@ -968,6 +978,7 @@ class emitter
// arm64: 2/7 bits
// loongarch64: 11/16 bits
// risc-v: 11/16 bits
+ // wasm: 32 bits
#define ID_ADJ_SMALL_CNS (int)(1 << (ID_BIT_SMALL_CNS - 1))
#define ID_CNT_SMALL_CNS (int)(1 << ID_BIT_SMALL_CNS)
@@ -1252,6 +1263,13 @@ class emitter
assert(sz <= 32);
_idCodeSize = sz;
}
+#elif defined(TARGET_WASM)
+ unsigned idCodeSize() const
+ {
+ // TODO-WASM: return an accurate number here based on instruction format/opcode like ARM64 above.
+ NYI_WASM("isCodeSize");
+ return 0;
+ }
#endif
emitAttr idOpSize() const
@@ -2138,7 +2156,28 @@ class emitter
#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C
#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C
-#endif // TARGET_XXX
+#elif defined(TARGET_WASM)
+// Latencies for an "average" physical target.
+//
+// a read,write or modify from stack location, possible def to use latency from L0 cache
+#define PERFSCORE_LATENCY_RD_STACK PERFSCORE_LATENCY_3C
+#define PERFSCORE_LATENCY_WR_STACK PERFSCORE_LATENCY_1C
+#define PERFSCORE_LATENCY_RD_WR_STACK PERFSCORE_LATENCY_3C
+
+// a read, write or modify from constant location, possible def to use latency from L0 cache
+#define PERFSCORE_LATENCY_RD_CONST_ADDR PERFSCORE_LATENCY_3C
+#define PERFSCORE_LATENCY_WR_CONST_ADDR PERFSCORE_LATENCY_1C
+#define PERFSCORE_LATENCY_RD_WR_CONST_ADDR PERFSCORE_LATENCY_3C
+
+// a read, write or modify from memory location, possible def to use latency from L0 or L1 cache
+// plus an extra cost (of 1.0) for a increased chance of a cache miss
+#define PERFSCORE_LATENCY_RD_GENERAL PERFSCORE_LATENCY_4C
+#define PERFSCORE_LATENCY_WR_GENERAL PERFSCORE_LATENCY_1C
+#define PERFSCORE_LATENCY_RD_WR_GENERAL PERFSCORE_LATENCY_4C
+
+#else
+#error Unknown TARGET
+#endif
// Make this an enum:
//
@@ -4309,6 +4348,7 @@ emitAttr emitter::emitGetMemOpSize(instrDesc* id, bool ignoreEmbeddedBroadcast)
#endif // TARGET_XARCH
+#if HAS_FIXED_REGISTER_SET
/*****************************************************************************
*
* Returns true if the given register contains a live GC ref.
@@ -4331,6 +4371,7 @@ inline GCtype emitter::emitRegGCtype(regNumber reg)
return GCT_NONE;
}
}
+#endif // HAS_FIXED_REGISTER_SET
#ifdef DEBUG
diff --git a/src/coreclr/jit/emitdef.h b/src/coreclr/jit/emitdef.h
index 1d261919e7e51d..ac4be9674b7dd3 100644
--- a/src/coreclr/jit/emitdef.h
+++ b/src/coreclr/jit/emitdef.h
@@ -16,6 +16,8 @@
#include "emitloongarch64.h"
#elif defined(TARGET_RISCV64)
#include "emitriscv64.h"
+#elif defined(TARGET_WASM)
+#include "emitwasm.h"
#else
#error Unsupported or unset target architecture
#endif
diff --git a/src/coreclr/jit/emitfmts.h b/src/coreclr/jit/emitfmts.h
index 883b4458d94290..313b91deba35fc 100644
--- a/src/coreclr/jit/emitfmts.h
+++ b/src/coreclr/jit/emitfmts.h
@@ -12,6 +12,8 @@
#include "emitfmtsloongarch64.h"
#elif defined(TARGET_RISCV64)
#include "emitfmtsriscv64.h"
+#elif defined(TARGET_WASM)
+#include "emitfmtswasm.h"
#else
#error Unsupported or unset target architecture
#endif // target type
diff --git a/src/coreclr/jit/emitfmtswasm.h b/src/coreclr/jit/emitfmtswasm.h
new file mode 100644
index 00000000000000..5e709355c555eb
--- /dev/null
+++ b/src/coreclr/jit/emitfmtswasm.h
@@ -0,0 +1,33 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+// clang-format off
+#if !defined(TARGET_WASM)
+#error Unexpected target type
+#endif
+
+#ifdef DEFINE_ID_OPS
+enum ID_OPS
+{
+ ID_OP_NONE,
+};
+#undef DEFINE_ID_OPS
+
+#else // !DEFINE_ID_OPS
+
+#ifndef IF_DEF
+#error Must define IF_DEF macro before including this file
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// enum insFormat instruction enum ID_OPS
+// scheduling
+// (unused)
+//////////////////////////////////////////////////////////////////////////////
+
+IF_DEF(NONE, IS_NONE, NONE)
+
+#undef IF_DEF
+#endif // !DEFINE_ID_OPS
+// clang-format on
diff --git a/src/coreclr/jit/emitinl.h b/src/coreclr/jit/emitinl.h
index f0e0f053fbf8be..88f88713e58b39 100644
--- a/src/coreclr/jit/emitinl.h
+++ b/src/coreclr/jit/emitinl.h
@@ -229,6 +229,7 @@ inline ssize_t emitter::emitGetInsAmdAny(const instrDesc* id) const
*/
/*static*/ inline void emitter::emitEncodeCallGCregs(regMaskTP regmask, instrDesc* id)
{
+#if HAS_FIXED_REGISTER_SET
unsigned encodeMask;
#ifdef TARGET_X86
@@ -415,8 +416,10 @@ inline ssize_t emitter::emitGetInsAmdAny(const instrDesc* id) const
#else
NYI("unknown target");
#endif
+#endif // HAS_FIXED_REGISTER_SET
}
+#if HAS_FIXED_REGISTER_SET
/*static*/ inline unsigned emitter::emitDecodeCallGCregs(instrDesc* id)
{
regMaskTP regmask = RBM_NONE;
@@ -586,6 +589,7 @@ inline ssize_t emitter::emitGetInsAmdAny(const instrDesc* id) const
return (unsigned int)regmask.getLow();
}
+#endif // HAS_FIXED_REGISTER_SET
#ifdef TARGET_XARCH
inline bool insIsCMOV(instruction ins)
diff --git a/src/coreclr/jit/emitjmps.h b/src/coreclr/jit/emitjmps.h
index 6c9861c91a1b17..d012fb4467a625 100644
--- a/src/coreclr/jit/emitjmps.h
+++ b/src/coreclr/jit/emitjmps.h
@@ -59,6 +59,8 @@ JMP_SMALL(jmp , jmp , j )
JMP_SMALL(eq , ne , beq ) // EQ
JMP_SMALL(ne , eq , bne ) // NE
+#elif defined(TARGET_WASM) // No jump kinds needed for WASM.
+
#else
#error Unsupported or unset target architecture
#endif // target type
diff --git a/src/coreclr/jit/emitwasm.cpp b/src/coreclr/jit/emitwasm.cpp
new file mode 100644
index 00000000000000..90a2b44574b6d8
--- /dev/null
+++ b/src/coreclr/jit/emitwasm.cpp
@@ -0,0 +1,94 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "jitpch.h"
+#ifdef _MSC_VER
+#pragma hdrstop
+#endif
+
+// clang-format off
+/*static*/ const BYTE CodeGenInterface::instInfo[] =
+{
+ #define INST(id, nm, info, opcode) info,
+ #include "instrs.h"
+};
+// clang-format on
+
+void emitter::emitIns(instruction ins)
+{
+ NYI_WASM("emitIns");
+}
+
+void emitter::emitIns_I(instruction ins, emitAttr attr, ssize_t imm)
+{
+ NYI_WASM("emitIns_I");
+}
+
+void emitter::emitIns_R(instruction ins, emitAttr attr, regNumber reg)
+{
+ NYI_WASM("emitIns_R");
+}
+
+void emitter::emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t imm)
+{
+ NYI_WASM("emitIns_R_I");
+}
+
+void emitter::emitIns_Mov(instruction ins, emitAttr attr, regNumber dstReg, regNumber srcReg, bool canSkip)
+{
+ NYI_WASM("emitIns_Mov");
+}
+
+void emitter::emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2)
+{
+ NYI_WASM("emitIns_R_R");
+}
+
+void emitter::emitIns_S_R(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs)
+{
+ NYI_WASM("emitIns_S_R");
+}
+
+bool emitter::emitInsIsStore(instruction ins)
+{
+ NYI_WASM("emitInsIsStore");
+ return false;
+}
+
+size_t emitter::emitSizeOfInsDsc(instrDesc* id) const
+{
+ NYI_WASM("emitSizeOfInsDsc"); // Note this should return the size of the "id" structure itself.
+ return 0;
+}
+
+void emitter::emitSetShortJump(instrDescJmp* id)
+{
+ NYI_WASM("emitSetShortJump");
+}
+
+size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
+{
+ NYI_WASM("emitOutputInstr");
+ return 0;
+}
+
+void emitter::emitDispIns(
+ instrDesc* id, bool isNew, bool doffs, bool asmfm, unsigned offset, BYTE* pCode, size_t sz, insGroup* ig)
+{
+ NYI_WASM("emitDispIns");
+}
+
+#if defined(DEBUG) || defined(LATE_DISASM)
+emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(instrDesc* id)
+{
+ NYI_WASM("getInsSveExecutionCharacteristics");
+ return {};
+}
+#endif // defined(DEBUG) || defined(LATE_DISASM)
+
+#ifdef DEBUG
+void emitter::emitInsSanityCheck(instrDesc* id)
+{
+ NYI_WASM("emitInsSanityCheck");
+}
+#endif // DEBUG
diff --git a/src/coreclr/jit/emitwasm.h b/src/coreclr/jit/emitwasm.h
new file mode 100644
index 00000000000000..3a35b189f5134c
--- /dev/null
+++ b/src/coreclr/jit/emitwasm.h
@@ -0,0 +1,48 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+/************************************************************************/
+/* Debug-only routines to display instructions */
+/************************************************************************/
+
+#if defined(DEBUG) || defined(LATE_DISASM)
+void getInsSveExecutionCharacteristics(instrDesc* id, insExecutionCharacteristics& result);
+#endif // defined(DEBUG) || defined(LATE_DISASM)
+
+/************************************************************************/
+/* The public entry points to output instructions */
+/************************************************************************/
+
+public:
+void emitIns(instruction ins);
+void emitIns_I(instruction ins, emitAttr attr, ssize_t imm);
+void emitIns_R(instruction ins, emitAttr attr, regNumber reg);
+
+void emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t imm);
+void emitIns_Mov(instruction ins, emitAttr attr, regNumber dstReg, regNumber srcReg, bool canSkip);
+void emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2);
+
+void emitIns_S_R(instruction ins, emitAttr attr, regNumber ireg, int varx, int offs);
+
+/************************************************************************/
+/* Private members that deal with target-dependent instr. descriptors */
+/************************************************************************/
+
+private:
+instrDesc* emitNewInstrCallDir(
+ int argCnt, VARSET_VALARG_TP GCvars, regMaskTP gcrefRegs, regMaskTP byrefRegs, emitAttr retSize, bool hasAsyncRet);
+
+instrDesc* emitNewInstrCallInd(int argCnt,
+ ssize_t disp,
+ VARSET_VALARG_TP GCvars,
+ regMaskTP gcrefRegs,
+ regMaskTP byrefRegs,
+ emitAttr retSize,
+ bool hasAsyncRet);
+
+/************************************************************************/
+/* Private helpers for instruction output */
+/************************************************************************/
+
+private:
+bool emitInsIsStore(instruction ins);
diff --git a/src/coreclr/jit/error.h b/src/coreclr/jit/error.h
index 7eb8906f0ff7db..8bbd5cac3889e9 100644
--- a/src/coreclr/jit/error.h
+++ b/src/coreclr/jit/error.h
@@ -167,6 +167,7 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line);
#define NYI_ARM64(msg) do { } while (0)
#define NYI_LOONGARCH64(msg) do { } while (0)
#define NYI_RISCV64(msg) do { } while (0)
+#define NYI_WASM(msg) do { } while (0)
#elif defined(TARGET_X86)
@@ -176,6 +177,7 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line);
#define NYI_ARM64(msg) do { } while (0)
#define NYI_LOONGARCH64(msg) do { } while (0)
#define NYI_RISCV64(msg) do { } while (0)
+#define NYI_WASM(msg) do { } while (0)
#elif defined(TARGET_ARM)
@@ -185,6 +187,7 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line);
#define NYI_ARM64(msg) do { } while (0)
#define NYI_LOONGARCH64(msg) do { } while (0)
#define NYI_RISCV64(msg) do { } while (0)
+#define NYI_WASM(msg) do { } while (0)
#elif defined(TARGET_ARM64)
@@ -194,6 +197,7 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line);
#define NYI_ARM64(msg) NYIRAW("NYI_ARM64: " msg)
#define NYI_LOONGARCH64(msg) do { } while (0)
#define NYI_RISCV64(msg) do { } while (0)
+#define NYI_WASM(msg) do { } while (0)
#elif defined(TARGET_LOONGARCH64)
#define NYI_AMD64(msg) do { } while (0)
@@ -202,6 +206,7 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line);
#define NYI_ARM64(msg) do { } while (0)
#define NYI_LOONGARCH64(msg) NYIRAW("NYI_LOONGARCH64: " msg)
#define NYI_RISCV64(msg) do { } while (0)
+#define NYI_WASM(msg) do { } while (0)
#elif defined(TARGET_RISCV64)
#define NYI_AMD64(msg) do { } while (0)
@@ -210,6 +215,16 @@ extern void notYetImplemented(const char* msg, const char* file, unsigned line);
#define NYI_ARM64(msg) do { } while (0)
#define NYI_LOONGARCH64(msg) do { } while (0)
#define NYI_RISCV64(msg) NYIRAW("NYI_RISCV64: " msg)
+#define NYI_WASM(msg) do { } while (0)
+
+#elif defined(TARGET_WASM)
+#define NYI_AMD64(msg) do { } while (0)
+#define NYI_X86(msg) do { } while (0)
+#define NYI_ARM(msg) do { } while (0)
+#define NYI_ARM64(msg) do { } while (0)
+#define NYI_LOONGARCH64(msg) do { } while (0)
+#define NYI_RISCV64(msg) do { } while (0)
+#define NYI_WASM(msg) NYIRAW("NYI_WASM: " msg)
#else
diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp
index 262277983af084..6ff8fb43b93d75 100644
--- a/src/coreclr/jit/flowgraph.cpp
+++ b/src/coreclr/jit/flowgraph.cpp
@@ -1420,15 +1420,6 @@ void Compiler::fgAddSyncMethodEnterExit()
// We need to update the bbPreds lists.
assert(fgPredsComputed);
-#if !FEATURE_EH
- // If we don't support EH, we can't add the EH needed by synchronized methods.
- // Of course, we could simply ignore adding the EH constructs, since we don't
- // support exceptions being thrown in this mode, but we would still need to add
- // the monitor enter/exit, and that doesn't seem worth it for this minor case.
- // By the time EH is working, we can just enable the whole thing.
- NYI("No support for synchronized methods");
-#endif // !FEATURE_EH
-
// Create a block for the start of the try region, where the monitor enter call
// will go.
BasicBlock* const tryBegBB = fgSplitBlockAtBeginning(fgFirstBB);
diff --git a/src/coreclr/jit/gcinfo.cpp b/src/coreclr/jit/gcinfo.cpp
index ce0c9bbf187f82..b68c662c9fccb4 100644
--- a/src/coreclr/jit/gcinfo.cpp
+++ b/src/coreclr/jit/gcinfo.cpp
@@ -19,19 +19,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "emit.h"
#include "jitgcinfo.h"
-#ifdef TARGET_AMD64
-#include "gcinfoencoder.h" //this includes a LOT of other files too
-#endif
-
-/*****************************************************************************/
-/*****************************************************************************/
-
-/*****************************************************************************/
-
-extern int JITGcBarrierCall;
-
-/*****************************************************************************/
-
#if MEASURE_PTRTAB_SIZE
/* static */ size_t GCInfo::s_gcRegPtrDscSize = 0;
/* static */ size_t GCInfo::s_gcTotalPtrTabSize = 0;
@@ -57,12 +44,15 @@ GCInfo::GCInfo(Compiler* theCompiler)
gcPtrArgCnt = 0;
gcCallDescList = nullptr;
gcCallDescLast = nullptr;
+#if EMIT_GENERATE_GCINFO
#ifdef JIT32_GCENCODER
+ // TODO-WASM-Factoring: exclude this whole file from the wasm build by factoring out write barrier selection.
gcEpilogTable = nullptr;
#else // !JIT32_GCENCODER
m_regSlotMap = nullptr;
m_stackSlotMap = nullptr;
#endif // JIT32_GCENCODER
+#endif // EMIT_GENERATE_GC_INFO
}
/*****************************************************************************/
@@ -206,6 +196,7 @@ void GCInfo::gcMarkRegSetNpt(regMaskTP regMask DEBUGARG(bool forceOutput))
void GCInfo::gcMarkRegPtrVal(regNumber reg, var_types type)
{
+#if EMIT_GENERATE_GCINFO
regMaskTP regMask = genRegMask(reg);
switch (type)
@@ -220,6 +211,7 @@ void GCInfo::gcMarkRegPtrVal(regNumber reg, var_types type)
gcMarkRegSetNpt(regMask);
break;
}
+#endif // EMIT_GENERATE_GCINFO
}
//------------------------------------------------------------------------
@@ -733,73 +725,3 @@ void GCInfo::gcRegPtrSetInit()
}
#endif // JIT32_GCENCODER
-
-//------------------------------------------------------------------------
-// gcUpdateForRegVarMove: Update the masks when a variable is moved
-//
-// Arguments:
-// srcMask - The register mask for the register(s) from which it is being moved
-// dstMask - The register mask for the register(s) to which it is being moved
-// type - The type of the variable
-//
-// Return Value:
-// None
-//
-// Notes:
-// This is called during codegen when a var is moved due to an LSRA_ASG.
-// It is also called by LinearScan::recordVarLocationAtStartOfBB() which is in turn called by
-// CodeGen::genCodeForBBList() at the block boundary.
-
-void GCInfo::gcUpdateForRegVarMove(regMaskTP srcMask, regMaskTP dstMask, LclVarDsc* varDsc)
-{
- var_types type = varDsc->TypeGet();
- bool isGCRef = (type == TYP_REF);
- bool isByRef = (type == TYP_BYREF);
-
- if (srcMask != RBM_NONE)
- {
- regSet->RemoveMaskVars(srcMask);
- if (isGCRef)
- {
- assert((gcRegByrefSetCur & srcMask) == 0);
- gcRegGCrefSetCur &= ~srcMask;
- gcRegGCrefSetCur |= dstMask; // safe if no dst, i.e. RBM_NONE
- }
- else if (isByRef)
- {
- assert((gcRegGCrefSetCur & srcMask) == 0);
- gcRegByrefSetCur &= ~srcMask;
- gcRegByrefSetCur |= dstMask; // safe if no dst, i.e. RBM_NONE
- }
- }
- else if (isGCRef || isByRef)
- {
- // In this case, we are moving it from the stack to a register,
- // so remove it from the set of live stack gc refs
- VarSetOps::RemoveElemD(compiler, gcVarPtrSetCur, varDsc->lvVarIndex);
- }
- if (dstMask != RBM_NONE)
- {
- regSet->AddMaskVars(dstMask);
- // If the source is a reg, then the gc sets have been set appropriately
- // Otherwise, we have to determine whether to set them
- if (srcMask == RBM_NONE)
- {
- if (isGCRef)
- {
- gcRegGCrefSetCur |= dstMask;
- }
- else if (isByRef)
- {
- gcRegByrefSetCur |= dstMask;
- }
- }
- }
- else if (isGCRef || isByRef)
- {
- VarSetOps::AddElemD(compiler, gcVarPtrSetCur, varDsc->lvVarIndex);
- }
-}
-
-/*****************************************************************************/
-/*****************************************************************************/
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index 0384b257934c7d..8e7476a854d73a 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -1006,6 +1006,7 @@ bool GenTree::NeedsConsecutiveRegisters() const
}
#endif
+#if HAS_FIXED_REGISTER_SET
//---------------------------------------------------------------
// gtGetContainedRegMask: Get the reg mask of the node including
// contained nodes (recursive).
@@ -1076,6 +1077,7 @@ regMaskTP GenTree::gtGetRegMask() const
return resultMask;
}
+#endif // HAS_FIXED_REGISTER_SET
void GenTreeFieldList::AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type)
{
@@ -4750,6 +4752,8 @@ bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_typ
addrModeCostSz += 4;
}
}
+#elif defined(TARGET_WASM)
+ NYI_WASM("gtMarkAddrMode");
#else
#error "Unknown TARGET"
#endif
@@ -5154,6 +5158,19 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
costEx = 1;
costSz = 4;
goto COMMON_CNS;
+#elif defined(TARGET_WASM)
+ case GT_CNS_STR:
+ costEx = IND_COST_EX + 2;
+ costSz = 7;
+ goto COMMON_CNS;
+
+ case GT_CNS_LNG:
+ case GT_CNS_INT:
+ // TODO-WASM: needs tuning based on the [S]LEB128 encoding size.
+ NYI_WASM("GT_CNS_LNG/GT_CNS_INT costing");
+ costEx = 0;
+ costSz = 0;
+ goto COMMON_CNS;
#else
case GT_CNS_STR:
case GT_CNS_LNG:
@@ -5231,6 +5248,9 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
// TODO-RISCV64-CQ: tune the costs.
costEx = 2;
costSz = 8;
+#elif defined(TARGET_WASM)
+ costEx = 2;
+ costSz = tree->TypeIs(TYP_FLOAT) ? 5 : 9;
#else
#error "Unknown TARGET"
#endif
@@ -5413,6 +5433,11 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
// TODO-RISCV64-CQ: tune the costs.
costEx = 1;
costSz = 4;
+#elif defined(TARGET_WASM)
+ // TODO-WASM: 1 byte opcodes except for the int->fp saturating casts which are 2 bytes.
+ NYI_WASM("Cast costing");
+ costEx = 0;
+ costSz = 0;
#else
#error "Unknown TARGET"
#endif
@@ -11688,14 +11713,15 @@ void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, _In_ _In_opt_
}
}
+#if HAS_FIXED_REGISTER_SET
// for tracking down problems in reguse prediction or liveness tracking
-
if (verbose && 0)
{
printf(" RR=");
dspRegMask(JitTls::GetCompiler()->codeGen->internalRegisters.GetAll(tree));
printf("\n");
}
+#endif // HAS_FIXED_REGISTER_SET
}
}
@@ -13346,6 +13372,7 @@ void Compiler::gtPrintABILocation(const ABIPassingInformation& abiInfo, char** b
{
if (segment.IsPassedInRegister())
{
+#if HAS_FIXED_REGISTER_SET
regMaskTP regs = segment.GetRegisterMask();
while (regs != RBM_NONE)
{
@@ -13366,6 +13393,10 @@ void Compiler::gtPrintABILocation(const ABIPassingInformation& abiInfo, char** b
lastReg = reg;
}
}
+#else // !HAS_FIXED_REGISTER_SET
+ // TODO-WASM: refactor this code to not rely on register masks.
+ NYI_WASM("gtPrintABILocation");
+#endif // !HAS_FIXED_REGISTER_SET
}
else
{
@@ -31299,6 +31330,7 @@ regNumber ReturnTypeDesc::GetABIReturnReg(unsigned idx, CorInfoCallConvExtension
return resultReg;
}
+#if HAS_FIXED_REGISTER_SET
//--------------------------------------------------------------------------------
// GetABIReturnRegs: get the mask of return registers as per target arch ABI.
//
@@ -31325,6 +31357,7 @@ regMaskTP ReturnTypeDesc::GetABIReturnRegs(CorInfoCallConvExtension callConv) co
return resultMask;
}
+#endif // HAS_FIXED_REGISTER_SET
//------------------------------------------------------------------------
// GetNum: Get the SSA number for a given field.
diff --git a/src/coreclr/jit/instr.cpp b/src/coreclr/jit/instr.cpp
index 5e918a8481823f..77e0e0427b4183 100644
--- a/src/coreclr/jit/instr.cpp
+++ b/src/coreclr/jit/instr.cpp
@@ -32,8 +32,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
const char* CodeGen::genInsName(instruction ins)
{
// clang-format off
- static
- const char * const insNames[] =
+ static const char * const insNames[] =
{
#if defined(TARGET_XARCH)
#define INST0(id, nm, um, mr, lat, tp, tt, flags) nm,
@@ -86,6 +85,10 @@ const char* CodeGen::genInsName(instruction ins)
#define INST(id, nm, ldst, e1) nm,
#include "instrs.h"
+#elif defined(TARGET_WASM)
+ #define INST(id, nm, info, opcode) nm,
+ #include "instrs.h"
+
#else
#error "Unknown TARGET"
#endif
@@ -2093,6 +2096,7 @@ instruction CodeGen::ins_Move_Extend(var_types srcType, bool srcInReg)
*/
instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false*/)
{
+ // TODO-Cleanup: split this function across target-specific files (e. g. emit.cpp).
if (varTypeUsesIntReg(srcType))
{
instruction ins = INS_invalid;
@@ -2248,6 +2252,7 @@ instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false*
}
#else
NYI("ins_Load");
+ return INS_none;
#endif
}
@@ -2260,6 +2265,7 @@ instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false*
*/
instruction CodeGen::ins_Copy(var_types dstType)
{
+ // TODO-Cleanup: split this function across target-specific files (e. g. emit.cpp).
assert(emitTypeActSz[dstType] != 0);
if (varTypeUsesIntReg(dstType))
@@ -2325,6 +2331,7 @@ instruction CodeGen::ins_Copy(var_types dstType)
}
#else
NYI("ins_Copy");
+ return INS_none;
#endif
}
@@ -2342,6 +2349,7 @@ instruction CodeGen::ins_Copy(var_types dstType)
//
instruction CodeGen::ins_Copy(regNumber srcReg, var_types dstType)
{
+ // TODO-Cleanup: split this function across target-specific files (e. g. emit.cpp).
assert(srcReg != REG_NA);
if (varTypeUsesIntReg(dstType))
@@ -2455,6 +2463,7 @@ instruction CodeGen::ins_Copy(regNumber srcReg, var_types dstType)
}
#else
NYI("ins_Copy");
+ return INS_none;
#endif
}
@@ -2469,6 +2478,7 @@ instruction CodeGen::ins_Copy(regNumber srcReg, var_types dstType)
*/
instruction CodeGenInterface::ins_Store(var_types dstType, bool aligned /*=false*/)
{
+ // TODO-Cleanup: split this function across target-specific files (e. g. emit.cpp).
if (varTypeUsesIntReg(dstType))
{
instruction ins = INS_invalid;
@@ -2572,6 +2582,7 @@ instruction CodeGenInterface::ins_Store(var_types dstType, bool aligned /*=false
}
#else
NYI("ins_Store");
+ return INS_none;
#endif
}
@@ -2970,6 +2981,8 @@ void CodeGen::instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags fla
GetEmitter()->emitIns_R_R_I(INS_ori, size, reg, REG_R0, 0);
#elif defined(TARGET_RISCV64)
GetEmitter()->emitIns_R_R_I(INS_addi, size, reg, REG_R0, 0);
+#elif defined(TARGET_WASM)
+ NYI_WASM("instGen_Set_Reg_To_Zero");
#else
#error "Unknown TARGET"
#endif
diff --git a/src/coreclr/jit/instr.h b/src/coreclr/jit/instr.h
index 197e5b8cc41f86..cc9c5bb7ac2420 100644
--- a/src/coreclr/jit/instr.h
+++ b/src/coreclr/jit/instr.h
@@ -76,6 +76,10 @@ enum instruction : uint32_t
#include "instrs.h"
INS_lea, // Not a real instruction. It is used for load the address of stack locals
+#elif defined(TARGET_WASM)
+ #define INST(id, nm, info, opcode) INS_##id,
+ #include "instrs.h"
+
#else
#error Unsupported target architecture
#endif
@@ -312,7 +316,7 @@ enum insOpts: unsigned
};
-#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
+#elif defined(TARGET_ARM) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) || defined(TARGET_WASM)
// TODO-Cleanup: Move 'insFlags' under TARGET_ARM
enum insFlags: unsigned
{
@@ -563,7 +567,11 @@ enum insBarrier : unsigned
{
INS_BARRIER_FULL = 0x33,
};
-
+#elif defined(TARGET_WASM)
+enum insOpts : unsigned
+{
+ INS_OPTS_NONE,
+};
#endif
#if defined(TARGET_XARCH)
diff --git a/src/coreclr/jit/instrs.h b/src/coreclr/jit/instrs.h
index 1bbbd3f2367e2b..124c3cb134de03 100644
--- a/src/coreclr/jit/instrs.h
+++ b/src/coreclr/jit/instrs.h
@@ -11,6 +11,8 @@
#include "instrsloongarch64.h"
#elif defined(TARGET_RISCV64)
#include "instrsriscv64.h"
+#elif defined(TARGET_WASM)
+#include "instrswasm.h"
#else
#error Unsupported or unset target architecture
#endif // target type
diff --git a/src/coreclr/jit/instrswasm.h b/src/coreclr/jit/instrswasm.h
new file mode 100644
index 00000000000000..125ae5f6930a8b
--- /dev/null
+++ b/src/coreclr/jit/instrswasm.h
@@ -0,0 +1,31 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+/*****************************************************************************
+ * WASM instructions
+ *
+ * id -- the enum name for the instruction
+ * nm -- textual name (for assembly display)
+ * info -- miscellaneous instruction info
+ * encode -- encoding (modulo operands)
+ *
+ ******************************************************************************/
+
+#ifndef TARGET_WASM
+#error Unexpected target type
+#endif
+
+#ifndef INST
+#error INST must be defined before including this file.
+#endif
+
+// TODO-WASM: fill out with more instructions (and everything else needed).
+//
+// clang-format off
+INST(invalid, "INVALID", 0, BAD_CODE)
+INST(unreachable, "unreachable", 0, 0x00)
+INST(nop, "nop", 0, 0x01)
+INST(i32_add, "i32.add", 0, 0x6a)
+// clang-format on
+
+#undef INST
diff --git a/src/coreclr/jit/jit.h b/src/coreclr/jit/jit.h
index d1e69950efc631..7d42a3223f0daf 100644
--- a/src/coreclr/jit/jit.h
+++ b/src/coreclr/jit/jit.h
@@ -145,6 +145,9 @@
#if defined(TARGET_RISCV64)
#error Cannot define both TARGET_X86 and TARGET_RISCV64
#endif
+#if defined(TARGET_WASM32)
+#error Cannot define both TARGET_X86 and TARGET_WASM32
+#endif
#elif defined(TARGET_AMD64)
#if defined(TARGET_X86)
#error Cannot define both TARGET_AMD64 and TARGET_X86
@@ -161,6 +164,9 @@
#if defined(TARGET_RISCV64)
#error Cannot define both TARGET_AMD64 and TARGET_RISCV64
#endif
+#if defined(TARGET_WASM32)
+#error Cannot define both TARGET_AMD64 and TARGET_WASM32
+#endif
#elif defined(TARGET_ARM)
#if defined(TARGET_X86)
#error Cannot define both TARGET_ARM and TARGET_X86
@@ -177,6 +183,9 @@
#if defined(TARGET_RISCV64)
#error Cannot define both TARGET_ARM and TARGET_RISCV64
#endif
+#if defined(TARGET_WASM32)
+#error Cannot define both TARGET_ARM and TARGET_WASM32
+#endif
#elif defined(TARGET_ARM64)
#if defined(TARGET_X86)
#error Cannot define both TARGET_ARM64 and TARGET_X86
@@ -193,6 +202,9 @@
#if defined(TARGET_RISCV64)
#error Cannot define both TARGET_ARM64 and TARGET_RISCV64
#endif
+#if defined(TARGET_WASM32)
+#error Cannot define both TARGET_ARM64 and TARGET_WASM32
+#endif
#elif defined(TARGET_LOONGARCH64)
#if defined(TARGET_X86)
#error Cannot define both TARGET_LOONGARCH64 and TARGET_X86
@@ -209,6 +221,9 @@
#if defined(TARGET_RISCV64)
#error Cannot define both TARGET_LOONGARCH64 and TARGET_RISCV64
#endif
+#if defined(TARGET_WASM32)
+#error Cannot define both TARGET_LOONGARCH64 and TARGET_WASM32
+#endif
#elif defined(TARGET_RISCV64)
#if defined(TARGET_X86)
#error Cannot define both TARGET_RISCV64 and TARGET_X86
@@ -225,6 +240,29 @@
#if defined(TARGET_LOONGARCH64)
#error Cannot define both TARGET_RISCV64 and TARGET_LOONGARCH64
#endif
+#if defined(TARGET_WASM32)
+#error Cannot define both TARGET_RISCV64 and TARGET_WASM32
+#endif
+
+#elif defined(TARGET_WASM32)
+#if defined(TARGET_X86)
+#error Cannot define both TARGET_WASM32 and TARGET_X86
+#endif
+#if defined(TARGET_AMD64)
+#error Cannot define both TARGET_WASM32 and TARGET_AMD64
+#endif
+#if defined(TARGET_ARM)
+#error Cannot define both TARGET_WASM32 and TARGET_ARM
+#endif
+#if defined(TARGET_ARM64)
+#error Cannot define both TARGET_WASM32 and TARGET_ARM64
+#endif
+#if defined(TARGET_LOONGARCH64)
+#error Cannot define both TARGET_WASM32 and TARGET_LOONGARCH64
+#endif
+#if defined(TARGET_RISCV64)
+#error Cannot define both TARGET_WASM32 and TARGET_RISCV64
+#endif
#else
#error Unsupported or unset target architecture
@@ -237,6 +275,9 @@
#ifdef TARGET_ARM
#error Cannot define both TARGET_ARM and TARGET_64BIT
#endif // TARGET_ARM
+#ifdef TARGET_WASM32
+#error Cannot define both TARGET_WASM32 and TARGET_64BIT
+#endif // TARGET_ARM
#endif // TARGET_64BIT
#if defined(TARGET_X86) || defined(TARGET_AMD64)
@@ -277,6 +318,11 @@
#define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_LOONGARCH64 // 0x6264
#elif defined(TARGET_RISCV64)
#define IMAGE_FILE_MACHINE_TARGET IMAGE_FILE_MACHINE_RISCV64 // 0x5064
+#elif defined(TARGET_WASM32)
+// TODO-WASM: refactor the Jit-EE interface (getExpectedTargetArchitecture) to stop relying on constants from the PE
+// spec for this.
+// TODO-Cleanup: move the defines to target.h files.
+#define IMAGE_FILE_MACHINE_TARGET 0
#else
#error Unsupported or unset target architecture
#endif
diff --git a/src/coreclr/jit/jitgcinfo.h b/src/coreclr/jit/jitgcinfo.h
index fa5500548a6b42..5cb226c02db3b8 100644
--- a/src/coreclr/jit/jitgcinfo.h
+++ b/src/coreclr/jit/jitgcinfo.h
@@ -8,14 +8,14 @@
#ifndef _JITGCINFO_H_
#define _JITGCINFO_H_
+// TODO-WASM-Factoring: don't include this header in the WASM build by factoring out write barrier selection.
+#if EMIT_GENERATE_GCINFO
#include "gcinfotypes.h"
#ifndef JIT32_GCENCODER
#include "gcinfoencoder.h"
#endif
-/*****************************************************************************/
-
#ifndef JIT32_GCENCODER
// Shash typedefs
struct RegSlotIdKey
@@ -79,6 +79,7 @@ typedef JitHashTable StackSlotMap;
#endif
typedef JitHashTable, VARSET_TP*> NodeToVarsetPtrMap;
+#endif // EMIT_GENERATE_GCINFO
class GCInfo
{
@@ -203,6 +204,7 @@ class GCInfo
regPtrDsc* gcRegPtrLast;
unsigned gcPtrArgCnt;
+#if EMIT_GENERATE_GCINFO
#ifndef JIT32_GCENCODER
enum MakeRegPtrMode
{
@@ -239,8 +241,8 @@ class GCInfo
unsigned instrOffset,
regPtrDsc* genStackPtrFirst,
regPtrDsc* genStackPtrLast);
-
#endif
+#endif // EMIT_GENERATE_GCINFO
#if MEASURE_PTRTAB_SIZE
static size_t s_gcRegPtrDscSize;
@@ -286,8 +288,7 @@ class GCInfo
CallDsc* gcCallDescList;
CallDsc* gcCallDescLast;
- //-------------------------------------------------------------------------
-
+#if EMIT_GENERATE_GCINFO
#ifdef JIT32_GCENCODER
void gcCountForHeader(UNALIGNED unsigned int* pUntrackedCount,
UNALIGNED unsigned int* pVarPtrTableSize,
@@ -310,13 +311,13 @@ class GCInfo
MakeRegPtrMode mode,
unsigned* callCntRef);
#endif
+#endif // EMIT_GENERATE_GCINFO
#ifdef JIT32_GCENCODER
size_t gcPtrTableSize(const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset);
BYTE* gcPtrTableSave(BYTE* destPtr, const InfoHdr& header, unsigned codeSize, size_t* pArgTabOffset);
#endif
void gcRegPtrSetInit();
- /*****************************************************************************/
// This enumeration yields the result of the analysis below, whether a store
// requires a write barrier:
@@ -346,6 +347,7 @@ class GCInfo
// These record the info about the procedure in the info-block
//
+#if EMIT_GENERATE_GCINFO
#ifdef JIT32_GCENCODER
private:
BYTE* gcEpilogTable;
@@ -367,10 +369,10 @@ class GCInfo
static size_t gcRecordEpilog(void* pCallBackData, unsigned offset);
ReturnKind getReturnKind();
-#else // JIT32_GCENCODER
+#else // !JIT32_GCENCODER
void gcInfoBlockHdrSave(GcInfoEncoder* gcInfoEncoder, unsigned methodSize, unsigned prologSize);
-
-#endif // JIT32_GCENCODER
+#endif // !JIT32_GCENCODER
+#endif // EMIT_GENERATE_GCINFO
// This method expands the tracked stack variables lifetimes so that any lifetimes within filters
// are reported as pinned.
@@ -396,10 +398,6 @@ class GCInfo
#endif // JIT32_GCENCODER
#endif // DUMP_GC_TABLES
-
-public:
- // This method updates the appropriate reg masks when a variable is moved.
- void gcUpdateForRegVarMove(regMaskTP srcMask, regMaskTP dstMask, LclVarDsc* varDsc);
};
inline unsigned char encodeUnsigned(BYTE* dest, unsigned value)
diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp
index 4e6b559c989baf..c5b4291ff2c99d 100644
--- a/src/coreclr/jit/lclvars.cpp
+++ b/src/coreclr/jit/lclvars.cpp
@@ -812,7 +812,6 @@ void Compiler::lvaClassifyParameterABI(Classifier& classifier)
lvaParameterPassingInfo =
info.compArgsCount == 0 ? nullptr : new (this, CMK_LvaTable) ABIPassingInformation[info.compArgsCount];
- regMaskTP argRegs = RBM_NONE;
for (unsigned i = 0; i < info.compArgsCount; i++)
{
LclVarDsc* dsc = lvaGetDesc(i);
@@ -853,7 +852,6 @@ void Compiler::lvaClassifyParameterABI(Classifier& classifier)
{
if (segment.IsPassedInRegister())
{
- argRegs |= segment.GetRegisterMask();
numRegisters++;
}
}
@@ -864,11 +862,6 @@ void Compiler::lvaClassifyParameterABI(Classifier& classifier)
lvaParameterStackSize = classifier.StackSize();
- // genFnPrologCalleeRegArgs expect these to be the counts of registers it knows how to handle.
- // TODO-Cleanup: Recompute these values in the backend instead, where they are used.
- codeGen->intRegState.rsCalleeRegArgCount = genCountBits(argRegs & RBM_ARG_REGS);
- codeGen->floatRegState.rsCalleeRegArgCount = genCountBits(argRegs & RBM_FLTARG_REGS);
-
#ifdef TARGET_ARM
// Prespill all argument regs on to stack in case of Arm when under profiler.
// We do this as the arm32 CORINFO_HELP_FCN_ENTER helper does not preserve
diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp
index 25e5db40c20c6c..f6c0e8634e36bf 100644
--- a/src/coreclr/jit/lower.cpp
+++ b/src/coreclr/jit/lower.cpp
@@ -4521,7 +4521,7 @@ GenTree* Lowering::LowerCompare(GenTree* cmp)
return cmp->gtNext;
}
-#if !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64)
+#if !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) && !defined(TARGET_WASM)
//------------------------------------------------------------------------
// Lowering::LowerJTrue: Lowers a JTRUE node.
//
@@ -4602,7 +4602,7 @@ GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue)
return nullptr;
}
-#endif // !TARGET_LOONGARCH64 && !TARGET_RISCV64
+#endif // !TARGET_LOONGARCH64 && !TARGET_RISCV64 && !defined(TARGET_WASM)
//----------------------------------------------------------------------------------------------
// LowerSelect: Lower a GT_SELECT node.
@@ -4735,7 +4735,10 @@ bool Lowering::TryLowerConditionToFlagsNode(GenTree* parent,
}
#endif
-#if !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64)
+#if !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) && !defined(TARGET_WASM)
+ // TODO-Cleanup: this ifdef look suspect, we should never get here on architectures without a status register,
+ // i. e. the right thing is to ifdef the whole function.
+ // TODO-Cleanup: introduce a "has CPU flags" target define.
if (!allowMultipleFlagsChecks)
{
const GenConditionDesc& desc = GenConditionDesc::Get(*cond);
@@ -4785,7 +4788,7 @@ bool Lowering::TryLowerConditionToFlagsNode(GenTree* parent,
{
assert((condition->gtPrev->gtFlags & GTF_SET_FLAGS) != 0);
GenTree* flagsDef = condition->gtPrev;
-#if defined(TARGET_ARM64) || defined(TARGET_AMD64)
+#if defined(TARGET_ARM64) || defined(TARGET_AMD64) && !defined(TARGET_WASM)
// CCMP is a flag producing node that also consumes flags, so find the
// "root" of the flags producers and move the entire range.
// We limit this to 10 nodes look back to avoid quadratic behavior.
@@ -4802,7 +4805,7 @@ bool Lowering::TryLowerConditionToFlagsNode(GenTree* parent,
*cond = condition->AsCC()->gtCondition;
-#if !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64)
+#if !defined(TARGET_LOONGARCH64) && !defined(TARGET_RISCV64) && !defined(TARGET_WASM)
if (!allowMultipleFlagsChecks)
{
const GenConditionDesc& desc = GenConditionDesc::Get(*cond);
@@ -8195,6 +8198,10 @@ bool Lowering::TryLowerConstIntDivOrMod(GenTree* node, GenTree** nextNode)
#elif defined(TARGET_ARM)
// Currently there's no GT_MULHI for ARM32
return false;
+#elif defined(TARGET_WASM)
+ // TODO-WASM: it is not clear this whole transformation is profitable on WASM, since it increases
+ // code size while the WASM VM should be perfectly capable of expanding the DIV/MOD by itself.
+ NYI_WASM("Lowering::TryLowerConstIntDivOrMod");
#else
#error Unsupported or unset target architecture
#endif
@@ -8471,13 +8478,13 @@ PhaseStatus Lowering::DoPhase()
InsertPInvokeMethodProlog();
}
-#if !defined(TARGET_64BIT)
+#if LOWER_DECOMPOSE_LONGS
DecomposeLongs decomp(comp, this); // Initialize the long decomposition class.
if (comp->compLongUsed)
{
decomp.PrepareForDecomposition();
}
-#endif // !defined(TARGET_64BIT)
+#endif // LOWER_DECOMPOSE_LONGS
if (!comp->compEnregLocals())
{
@@ -8499,7 +8506,7 @@ PhaseStatus Lowering::DoPhase()
/* Make the block publicly available */
comp->compCurBB = block;
-#if !defined(TARGET_64BIT)
+#if LOWER_DECOMPOSE_LONGS
if (comp->compLongUsed)
{
decomp.DecomposeBlock(block);
@@ -11237,7 +11244,7 @@ void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, Bas
ind->ChangeType(comp->gtTypeForNullCheck(ind));
-#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) || defined(TARGET_WASM)
bool useNullCheck = true;
#elif defined(TARGET_ARM)
bool useNullCheck = false;
diff --git a/src/coreclr/jit/lower.h b/src/coreclr/jit/lower.h
index 30d98f6880d51b..5c4d356b737a0e 100644
--- a/src/coreclr/jit/lower.h
+++ b/src/coreclr/jit/lower.h
@@ -16,9 +16,16 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#include "compiler.h"
#include "phase.h"
-#include "lsra.h"
#include "sideeffects.h"
+#if HAS_FIXED_REGISTER_SET
+#include "lsra.h"
+#endif
+
+#ifdef TARGET_WASM
+#include "regallocwasm.h"
+#endif
+
class Lowering final : public Phase
{
public:
diff --git a/src/coreclr/jit/lowerwasm.cpp b/src/coreclr/jit/lowerwasm.cpp
new file mode 100644
index 00000000000000..dd63a8fb17eb54
--- /dev/null
+++ b/src/coreclr/jit/lowerwasm.cpp
@@ -0,0 +1,320 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XX XX
+XX Lowering WASM XX
+XX XX
+XX This encapsulates all the logic for lowering trees for the WebAssembly XX
+XX architecture. For a more detailed view of what is lowering, please XX
+XX take a look at Lower.cpp XX
+XX XX
+XX XX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+*/
+
+#include "jitpch.h"
+#ifdef _MSC_VER
+#pragma hdrstop
+#endif
+
+#include "lower.h"
+
+//------------------------------------------------------------------------
+// IsCallTargetInRange: Can a call target address be encoded in-place?
+//
+// Return Value:
+// Always true since there are no encoding range considerations on WASM.
+//
+bool Lowering::IsCallTargetInRange(void* addr)
+{
+ return true;
+}
+
+//------------------------------------------------------------------------
+// IsContainableImmed: Is an immediate encodable in-place?
+//
+// Return Value:
+// True if the immediate can be folded into an instruction,
+// for example small enough and non-relocatable.
+//
+bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const
+{
+ return false;
+}
+
+//------------------------------------------------------------------------
+// LowerStoreLoc: Lower a store of a lclVar
+//
+// Arguments:
+// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
+//
+// Notes:
+// This involves:
+// - Widening small stores (on ARM).
+//
+// Returns:
+// Next node to lower.
+//
+GenTree* Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
+{
+ if (storeLoc->OperIs(GT_STORE_LCL_FLD))
+ {
+ // We should only encounter this for lclVars that are lvDoNotEnregister.
+ verifyLclFldDoNotEnregister(storeLoc->GetLclNum());
+ }
+
+ ContainCheckStoreLoc(storeLoc);
+ return storeLoc->gtNext;
+}
+
+//------------------------------------------------------------------------
+// LowerStoreIndir: Determine addressing mode for an indirection, and whether operands are contained.
+//
+// Arguments:
+// node - The indirect store node (GT_STORE_IND) of interest
+//
+// Return Value:
+// Next node to lower.
+//
+GenTree* Lowering::LowerStoreIndir(GenTreeStoreInd* node)
+{
+ ContainCheckStoreIndir(node);
+ return node->gtNext;
+}
+
+//------------------------------------------------------------------------
+// LowerMul: Lower a GT_MUL node.
+//
+// Arguments:
+// mul - The node to lower
+//
+// Return Value:
+// The next node to lower.
+//
+GenTree* Lowering::LowerMul(GenTreeOp* mul)
+{
+ assert(mul->OperIs(GT_MUL));
+ ContainCheckMul(mul);
+ return mul->gtNext;
+}
+
+//------------------------------------------------------------------------
+// Lowering::LowerJTrue: Lowers a JTRUE node.
+//
+// Arguments:
+// jtrue - the JTRUE node
+//
+// Return Value:
+// The next node to lower (usually nullptr).
+//
+GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue)
+{
+ NYI_WASM("LowerJTrue");
+ return jtrue->gtNext;
+}
+
+//------------------------------------------------------------------------
+// LowerBinaryArithmetic: lowers the given binary arithmetic node.
+//
+// Arguments:
+// node - the arithmetic node to lower
+//
+// Returns:
+// The next node to lower.
+//
+GenTree* Lowering::LowerBinaryArithmetic(GenTreeOp* binOp)
+{
+ ContainCheckBinary(binOp);
+ return binOp->gtNext;
+}
+
+//------------------------------------------------------------------------
+// LowerBlockStore: Lower a block store node
+//
+// Arguments:
+// blkNode - The block store node to lower
+//
+void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
+{
+ NYI_WASM("LowerBlockStore");
+}
+
+//------------------------------------------------------------------------
+// LowerPutArgStk: Lower a GT_PUTARG_STK.
+//
+// Arguments:
+// putArgStk - The node to lower
+//
+void Lowering::LowerPutArgStk(GenTreePutArgStk* putArgNode)
+{
+ unreached(); // Currently no stack args on WASM.
+}
+
+//------------------------------------------------------------------------
+// LowerCast: Lower GT_CAST(srcType, DstType) nodes.
+//
+// Arguments:
+// tree - GT_CAST node to be lowered
+//
+// Return Value:
+// None.
+//
+void Lowering::LowerCast(GenTree* tree)
+{
+ assert(tree->OperIs(GT_CAST));
+ ContainCheckCast(tree->AsCast());
+}
+
+//------------------------------------------------------------------------
+// LowerRotate: Lower GT_ROL and GT_ROR nodes.
+//
+// Arguments:
+// tree - the node to lower
+//
+// Return Value:
+// None.
+//
+void Lowering::LowerRotate(GenTree* tree)
+{
+ ContainCheckShiftRotate(tree->AsOp());
+}
+
+//------------------------------------------------------------------------
+// ContainCheckCallOperands: Determine whether operands of a call should be contained.
+//
+// Arguments:
+// call - The call node of interest
+//
+// Return Value:
+// None.
+//
+void Lowering::ContainCheckCallOperands(GenTreeCall* call)
+{
+}
+
+//------------------------------------------------------------------------
+// ContainCheckStoreIndir: determine whether the sources of a STOREIND node should be contained.
+//
+// Arguments:
+// node - pointer to the node
+//
+void Lowering::ContainCheckStoreIndir(GenTreeStoreInd* node)
+{
+ ContainCheckIndir(node);
+}
+
+//------------------------------------------------------------------------
+// ContainCheckIndir: Determine whether operands of an indir should be contained.
+//
+// Arguments:
+// indirNode - The indirection node of interest
+//
+// Notes:
+// This is called for both store and load indirections.
+//
+// Return Value:
+// None.
+//
+void Lowering::ContainCheckIndir(GenTreeIndir* indirNode)
+{
+ // If this is the rhs of a block copy it will be handled when we handle the store.
+ if (indirNode->TypeIs(TYP_STRUCT))
+ {
+ return;
+ }
+
+ // TODO-WASM-CQ: contain suitable LEAs here. Take note of the fact that for this to be correct we must prove the
+ // LEA doesn't overflow. It will involve creating a new frontend node to represent "nuw" (offset) addition.
+ GenTree* addr = indirNode->Addr();
+ if (addr->OperIs(GT_LCL_ADDR) && IsContainableLclAddr(addr->AsLclFld(), indirNode->Size()))
+ {
+ // These nodes go into an addr mode:
+ // - GT_LCL_ADDR is a stack addr mode.
+ MakeSrcContained(indirNode, addr);
+ }
+}
+
+//------------------------------------------------------------------------
+// ContainCheckBinary: Determine whether a binary op's operands should be contained.
+//
+// Arguments:
+// node - the node we care about
+//
+void Lowering::ContainCheckBinary(GenTreeOp* node)
+{
+}
+
+//------------------------------------------------------------------------
+// ContainCheckMul: Determine whether a mul op's operands should be contained.
+//
+// Arguments:
+// node - the node we care about
+//
+void Lowering::ContainCheckMul(GenTreeOp* node)
+{
+}
+
+//------------------------------------------------------------------------
+// ContainCheckDivOrMod: determine which operands of a div/mod should be contained.
+//
+// Arguments:
+// node - the node we care about
+//
+void Lowering::ContainCheckDivOrMod(GenTreeOp* node)
+{
+}
+
+//------------------------------------------------------------------------
+// ContainCheckShiftRotate: Determine whether a mul op's operands should be contained.
+//
+// Arguments:
+// node - the node we care about
+//
+void Lowering::ContainCheckShiftRotate(GenTreeOp* node)
+{
+}
+
+//------------------------------------------------------------------------
+// ContainCheckStoreLoc: determine whether the source of a STORE_LCL* should be contained.
+//
+// Arguments:
+// node - pointer to the node
+//
+void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const
+{
+}
+
+//------------------------------------------------------------------------
+// ContainCheckCast: determine whether the source of a CAST node should be contained.
+//
+// Arguments:
+// node - pointer to the node
+//
+void Lowering::ContainCheckCast(GenTreeCast* node)
+{
+ // TODO-WASM-CQ: do containment for casts which can be expressed in terms of memory loads.
+}
+
+//------------------------------------------------------------------------
+// ContainCheckCompare: determine whether the sources of a compare node should be contained.
+//
+// Arguments:
+// node - pointer to the node
+//
+void Lowering::ContainCheckCompare(GenTreeOp* cmp)
+{
+ // TODO-WASM-CQ: do containment for [i32|i64].eqz.
+}
+
+//------------------------------------------------------------------------
+// ContainCheckSelect : determine whether the source of a select should be contained.
+//
+// Arguments:
+// node - pointer to the node
+//
+void Lowering::ContainCheckSelect(GenTreeOp* node)
+{
+}
diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp
index a2dd58244be7b1..68bd77e9c70971 100644
--- a/src/coreclr/jit/lsra.cpp
+++ b/src/coreclr/jit/lsra.cpp
@@ -1009,9 +1009,6 @@ LinearScan::LinearScan(Compiler* theCompiler)
compiler->rpFrameType = FT_NOT_SET;
compiler->rpMustCreateEBPCalled = false;
- compiler->codeGen->intRegState.rsIsFloat = false;
- compiler->codeGen->floatRegState.rsIsFloat = true;
-
// Block sequencing (the order in which we schedule).
// Note that we don't initialize the bbVisitedSet until we do the first traversal
// This is so that any blocks that are added during the first traversal are accounted for.
diff --git a/src/coreclr/jit/regallocwasm.cpp b/src/coreclr/jit/regallocwasm.cpp
new file mode 100644
index 00000000000000..1404317b1bc32d
--- /dev/null
+++ b/src/coreclr/jit/regallocwasm.cpp
@@ -0,0 +1,27 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "jitpch.h"
+#ifdef _MSC_VER
+#pragma hdrstop
+#endif
+
+#include "regallocwasm.h"
+
+LinearScanInterface* getLinearScanAllocator(Compiler* compiler)
+{
+ NYI_WASM("getLinearScanAllocator");
+ return nullptr;
+}
+
+bool LinearScan::isRegCandidate(LclVarDsc* varDsc)
+{
+ NYI_WASM("isRegCandidate");
+ return false;
+}
+
+bool LinearScan::isContainableMemoryOp(GenTree* node)
+{
+ NYI_WASM("isContainableMemoryOp");
+ return false;
+}
diff --git a/src/coreclr/jit/regallocwasm.h b/src/coreclr/jit/regallocwasm.h
new file mode 100644
index 00000000000000..51c4bbaa095039
--- /dev/null
+++ b/src/coreclr/jit/regallocwasm.h
@@ -0,0 +1,12 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#pragma once
+
+// TODO-WASM-Factoring: rename the abstractions related to register allocation to make them less LSRA-specific.
+class LinearScan : public LinearScanInterface
+{
+public:
+ bool isRegCandidate(LclVarDsc* varDsc);
+ bool isContainableMemoryOp(GenTree* node);
+};
diff --git a/src/coreclr/jit/register.h b/src/coreclr/jit/register.h
index b735fede117a4d..5c9a03872e9740 100644
--- a/src/coreclr/jit/register.h
+++ b/src/coreclr/jit/register.h
@@ -356,16 +356,14 @@ REGDEF(STK, 8+KBASE, 0x0000, "STK" )
#elif defined(TARGET_ARM)
#include "registerarm.h"
-
#elif defined(TARGET_ARM64)
#include "registerarm64.h"
-
#elif defined(TARGET_LOONGARCH64)
#include "registerloongarch64.h"
-
#elif defined(TARGET_RISCV64)
#include "registerriscv64.h"
-
+#elif defined(TARGET_WASM)
+#include "registerwasm.h"
#else
#error Unsupported or unset target architecture
#endif // target type
diff --git a/src/coreclr/jit/registerwasm.h b/src/coreclr/jit/registerwasm.h
new file mode 100644
index 00000000000000..f32b744af741f7
--- /dev/null
+++ b/src/coreclr/jit/registerwasm.h
@@ -0,0 +1,4 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+REGDEF(STK, 0, 0x0, "STK")
diff --git a/src/coreclr/jit/regset.cpp b/src/coreclr/jit/regset.cpp
index 3d9354b040f48e..81d3c1f777b868 100644
--- a/src/coreclr/jit/regset.cpp
+++ b/src/coreclr/jit/regset.cpp
@@ -35,6 +35,10 @@ const regMaskSmall regMasks[] = {
};
#endif
+// TODO-WASM-Factoring: remove this whole file from !HAS_FIXED_REGISTER_SET compilation.
+// It is being kept for now to avoid ifdefing too much code related to spill temps (which
+// also should not be used with !HAS_FIXED_REGISTER_SET).
+#if HAS_FIXED_REGISTER_SET
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
@@ -240,8 +244,7 @@ void RegSet::SetMaskVars(regMaskTP newMaskVars)
_rsMaskVars = newMaskVars;
}
-
-/*****************************************************************************/
+#endif // HAS_FIXED_REGISTER_SET
RegSet::RegSet(Compiler* compiler, GCInfo& gcInfo)
: m_rsCompiler(compiler)
@@ -307,6 +310,7 @@ RegSet::SpillDsc* RegSet::rsGetSpillInfo(GenTree* tree, regNumber reg, SpillDsc*
return dsc;
}
+#if HAS_FIXED_REGISTER_SET
//------------------------------------------------------------
// rsSpillTree: Spill the tree held in 'reg'.
//
@@ -439,6 +443,7 @@ void RegSet::rsSpillTree(regNumber reg, GenTree* tree, unsigned regIdx /* =0 */)
tree->SetRegSpillFlagByIdx(regFlags, regIdx);
}
}
+#endif // HAS_FIXED_REGISTER_SET
#if defined(TARGET_X86)
/*****************************************************************************
diff --git a/src/coreclr/jit/target.h b/src/coreclr/jit/target.h
index 4d567a5b24b09f..4ea99cf3d58029 100644
--- a/src/coreclr/jit/target.h
+++ b/src/coreclr/jit/target.h
@@ -60,6 +60,8 @@ inline bool compUnixX86Abi()
#define TARGET_READABLE_NAME "LOONGARCH64"
#elif defined(TARGET_RISCV64)
#define TARGET_READABLE_NAME "RISCV64"
+#elif defined(TARGET_WASM32)
+#define TARGET_READABLE_NAME "WASM32"
#else
#error Unsupported or unset target architecture
#endif
@@ -91,6 +93,10 @@ inline bool compUnixX86Abi()
#define REGMASK_BITS 64
#define CSE_CONST_SHARED_LOW_BITS 12
+#elif defined(TARGET_WASM)
+#define REGMASK_BITS 32
+#define CSE_CONST_SHARED_LOW_BITS 12
+
#else
#error Unsupported or unset target architecture
#endif
@@ -106,10 +112,10 @@ inline bool compUnixX86Abi()
// be assigned during register allocation.
// REG_NA - Used to indicate that a register is either not yet assigned or not required.
//
-#if defined(TARGET_ARM) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) || defined(TARGET_WASM)
enum _regNumber_enum : unsigned
{
-#if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
+#if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) || defined(TARGET_WASM)
// LA64 and RV64 don't require JITREG_ workaround for Android (see register.h)
#define REGDEF(name, rnum, mask, sname) REG_##name = rnum,
#define REGALIAS(alias, realname) REG_##alias = REG_##realname,
@@ -331,7 +337,7 @@ struct regMaskTP
}
#endif
-#ifndef TARGET_X86
+#if REGMASK_BITS != 32
explicit operator unsigned int() const
{
return (unsigned int)low;
@@ -413,7 +419,7 @@ struct regMaskTP
}
};
-#if defined(TARGET_ARM) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
+#if defined(TARGET_ARM) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) || defined(TARGET_WASM)
#define REGDEF(name, rnum, mask, sname) \
static constexpr regMaskTP RBM_##name = \
@@ -599,8 +605,10 @@ static uint32_t BitScanForward(const regMaskTP& mask)
#include "targetloongarch64.h"
#elif defined(TARGET_RISCV64)
#include "targetriscv64.h"
+#elif defined(TARGET_WASM)
+#include "targetwasm.h"
#else
- #error Unsupported or unset target architecture
+#error Unsupported or unset target architecture
#endif
#ifdef TARGET_XARCH
@@ -622,10 +630,6 @@ static uint32_t BitScanForward(const regMaskTP& mask)
#endif // TARGET_XARCH
-static_assert(REG_FIRST == 0);
-static_assert(REG_INT_FIRST < REG_INT_LAST);
-static_assert(REG_FP_FIRST < REG_FP_LAST);
-
// Opportunistic tail call feature converts non-tail prefixed calls into
// tail calls where possible. It requires fast tail calling mechanism for
// performance. Otherwise, we are better off not converting non-tail prefixed
@@ -675,6 +679,84 @@ const char* getRegNameFloat(regNumber reg, var_types type);
extern void dspRegMask(regMaskTP regMask, size_t minSiz = 0);
#endif
+inline bool isFloatRegType(var_types type)
+{
+ // TODO-Cleanup: delete and use "varTypeUsesFloatReg" directly.
+ return varTypeUsesFloatReg(type);
+}
+
+//-------------------------------------------------------------------------------------------
+// hasFixedRetBuffReg:
+// Returns true if our target architecture uses a fixed return buffer register
+//
+inline bool hasFixedRetBuffReg(CorInfoCallConvExtension callConv)
+{
+#if defined(TARGET_ARM64)
+ // Windows does not use fixed ret buff arg for instance calls, but does otherwise.
+ return !TargetOS::IsWindows || !callConvIsInstanceMethodCallConv(callConv);
+#elif defined(TARGET_AMD64) && defined(SWIFT_SUPPORT)
+ return callConv == CorInfoCallConvExtension::Swift;
+#else
+ return false;
+#endif
+}
+
+//-------------------------------------------------------------------------------------------
+// theFixedRetBuffReg:
+// Returns the regNumber to use for the fixed return buffer
+//
+inline regNumber theFixedRetBuffReg(CorInfoCallConvExtension callConv)
+{
+ assert(hasFixedRetBuffReg(callConv)); // This predicate should be checked before calling this method
+#if defined(TARGET_ARM64)
+ return REG_ARG_RET_BUFF;
+#elif defined(TARGET_AMD64) && defined(SWIFT_SUPPORT)
+ assert(callConv == CorInfoCallConvExtension::Swift);
+ return REG_SWIFT_ARG_RET_BUFF;
+#else
+ return REG_NA;
+#endif
+}
+
+//-------------------------------------------------------------------------------------------
+// theFixedRetBuffMask:
+// Returns the regNumber to use for the fixed return buffer
+//
+inline regMaskTP theFixedRetBuffMask(CorInfoCallConvExtension callConv)
+{
+ assert(hasFixedRetBuffReg(callConv)); // This predicate should be checked before calling this method
+#if defined(TARGET_ARM64)
+ return RBM_ARG_RET_BUFF;
+#elif defined(TARGET_AMD64) && defined(SWIFT_SUPPORT)
+ assert(callConv == CorInfoCallConvExtension::Swift);
+ return RBM_SWIFT_ARG_RET_BUFF;
+#else
+ return 0;
+#endif
+}
+
+//-------------------------------------------------------------------------------------------
+// theFixedRetBuffArgNum:
+// Returns the argNum to use for the fixed return buffer
+//
+inline unsigned theFixedRetBuffArgNum(CorInfoCallConvExtension callConv)
+{
+ assert(hasFixedRetBuffReg(callConv)); // This predicate should be checked before calling this method
+#ifdef TARGET_ARM64
+ return RET_BUFF_ARGNUM;
+#elif defined(TARGET_AMD64) && defined(SWIFT_SUPPORT)
+ assert(callConv == CorInfoCallConvExtension::Swift);
+ return SWIFT_RET_BUFF_ARGNUM;
+#else
+ return BAD_VAR_NUM;
+#endif
+}
+
+#if HAS_FIXED_REGISTER_SET
+static_assert(REG_FIRST == 0);
+static_assert(REG_INT_FIRST < REG_INT_LAST);
+static_assert(REG_FP_FIRST < REG_FP_LAST);
+
#if CPU_HAS_BYTE_REGS
inline bool isByteReg(regNumber reg)
{
@@ -749,73 +831,6 @@ inline bool genIsValidDoubleReg(regNumber reg)
#endif // TARGET_ARM
-//-------------------------------------------------------------------------------------------
-// hasFixedRetBuffReg:
-// Returns true if our target architecture uses a fixed return buffer register
-//
-inline bool hasFixedRetBuffReg(CorInfoCallConvExtension callConv)
-{
-#if defined(TARGET_ARM64)
- // Windows does not use fixed ret buff arg for instance calls, but does otherwise.
- return !TargetOS::IsWindows || !callConvIsInstanceMethodCallConv(callConv);
-#elif defined(TARGET_AMD64) && defined(SWIFT_SUPPORT)
- return callConv == CorInfoCallConvExtension::Swift;
-#else
- return false;
-#endif
-}
-
-//-------------------------------------------------------------------------------------------
-// theFixedRetBuffReg:
-// Returns the regNumber to use for the fixed return buffer
-//
-inline regNumber theFixedRetBuffReg(CorInfoCallConvExtension callConv)
-{
- assert(hasFixedRetBuffReg(callConv)); // This predicate should be checked before calling this method
-#if defined(TARGET_ARM64)
- return REG_ARG_RET_BUFF;
-#elif defined(TARGET_AMD64) && defined(SWIFT_SUPPORT)
- assert(callConv == CorInfoCallConvExtension::Swift);
- return REG_SWIFT_ARG_RET_BUFF;
-#else
- return REG_NA;
-#endif
-}
-
-//-------------------------------------------------------------------------------------------
-// theFixedRetBuffMask:
-// Returns the regNumber to use for the fixed return buffer
-//
-inline regMaskTP theFixedRetBuffMask(CorInfoCallConvExtension callConv)
-{
- assert(hasFixedRetBuffReg(callConv)); // This predicate should be checked before calling this method
-#if defined(TARGET_ARM64)
- return RBM_ARG_RET_BUFF;
-#elif defined(TARGET_AMD64) && defined(SWIFT_SUPPORT)
- assert(callConv == CorInfoCallConvExtension::Swift);
- return RBM_SWIFT_ARG_RET_BUFF;
-#else
- return 0;
-#endif
-}
-
-//-------------------------------------------------------------------------------------------
-// theFixedRetBuffArgNum:
-// Returns the argNum to use for the fixed return buffer
-//
-inline unsigned theFixedRetBuffArgNum(CorInfoCallConvExtension callConv)
-{
- assert(hasFixedRetBuffReg(callConv)); // This predicate should be checked before calling this method
-#ifdef TARGET_ARM64
- return RET_BUFF_ARGNUM;
-#elif defined(TARGET_AMD64) && defined(SWIFT_SUPPORT)
- assert(callConv == CorInfoCallConvExtension::Swift);
- return SWIFT_RET_BUFF_ARGNUM;
-#else
- return BAD_VAR_NUM;
-#endif
-}
-
//-------------------------------------------------------------------------------------------
// fullIntArgRegMask:
// Returns the full mask of all possible integer registers
@@ -1119,16 +1134,6 @@ inline regNumber regNextOfType(regNumber reg, var_types type)
return regReturn;
}
-/*****************************************************************************
- *
- * Type checks
- */
-
-inline bool isFloatRegType(var_types type)
-{
- return varTypeUsesFloatReg(type);
-}
-
// If the WINDOWS_AMD64_ABI is defined make sure that TARGET_AMD64 is also defined.
#if defined(WINDOWS_AMD64_ABI)
#if !defined(TARGET_AMD64)
@@ -1158,6 +1163,7 @@ static_assert((RBM_ALLINT & RBM_FPBASE) == RBM_NONE);
static_assert((RBM_INT_CALLEE_SAVED & RBM_FPBASE) == RBM_NONE);
#endif
/*****************************************************************************/
+#endif // HAS_FIXED_REGISTER_SET
#ifdef TARGET_64BIT
typedef uint64_t target_size_t;
diff --git a/src/coreclr/jit/targetamd64.h b/src/coreclr/jit/targetamd64.h
index d85f041a6bce03..ab2518f117b2d9 100644
--- a/src/coreclr/jit/targetamd64.h
+++ b/src/coreclr/jit/targetamd64.h
@@ -10,7 +10,6 @@
// TODO-AMD64-CQ: Fine tune the following xxBlk threshold values:
#define CPU_LOAD_STORE_ARCH 0
- #define ROUND_FLOAT 0 // Do not round intermed float expression results
#define CPU_HAS_BYTE_REGS 0
#define CPOBJ_NONGC_SLOTS_LIMIT 4 // For CpObj code generation, this is the threshold of the number
@@ -36,7 +35,6 @@
#define FEATURE_MULTIREG_ARGS 1 // Support for passing a single argument in more than one register
#define FEATURE_MULTIREG_RET 1 // Support for returning a single value in more than one register
#define FEATURE_MULTIREG_STRUCT_PROMOTE 1 // True when we want to promote fields of a multireg struct into registers
- #define FEATURE_STRUCT_CLASSIFIER 1 // Uses a classifier function to determine if structs are passed/returned in more than one register
#define MAX_PASS_MULTIREG_BYTES 32 // Maximum size of a struct that could be passed in more than one register (Max is two SIMD16s)
#define MAX_RET_MULTIREG_BYTES 32 // Maximum size of a struct that could be returned in more than one register (Max is two SIMD16s)
#define MAX_ARG_REG_COUNT 2 // Maximum registers used to pass a single argument in multiple registers.
@@ -64,15 +62,16 @@
#define NOGC_WRITE_BARRIERS 0 // We DO-NOT have specialized WriteBarrier JIT Helpers that DO-NOT trash the RBM_CALLEE_TRASH registers
#define USER_ARGS_COME_LAST 1
- #define EMIT_TRACK_STACK_DEPTH 1
#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target
- #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses.
#ifdef UNIX_AMD64_ABI
#define ETW_EBP_FRAMED 1 // if 1 we cannot use EBP as a scratch register and must create EBP based frames for most methods
#else // !UNIX_AMD64_ABI
#define ETW_EBP_FRAMED 0 // if 1 we cannot use EBP as a scratch register and must create EBP based frames for most methods
#endif // !UNIX_AMD64_ABI
+
#define CSE_CONSTS 1 // Enable if we want to CSE constants
+ #define EMIT_TRACK_STACK_DEPTH 1
+ #define EMIT_GENERATE_GCINFO 1 // Track GC ref liveness in codegen and emit and generate GCInfo based on that
#define RBM_LOWFLOAT (RBM_XMM0 | RBM_XMM1 | RBM_XMM2 | RBM_XMM3 | RBM_XMM4 | RBM_XMM5 | RBM_XMM6 | RBM_XMM7 | RBM_XMM8 | RBM_XMM9 | RBM_XMM10 | RBM_XMM11 | RBM_XMM12 | RBM_XMM13 | RBM_XMM14 | RBM_XMM15 )
#define RBM_HIGHFLOAT (RBM_XMM16 | RBM_XMM17 | RBM_XMM18 | RBM_XMM19 | RBM_XMM20 | RBM_XMM21 | RBM_XMM22 | RBM_XMM23 | RBM_XMM24 | RBM_XMM25 | RBM_XMM26 | RBM_XMM27 | RBM_XMM28 | RBM_XMM29 | RBM_XMM30 | RBM_XMM31)
@@ -102,6 +101,7 @@
#define LAST_FP_ARGREG REG_XMM3
#endif // !UNIX_AMD64_ABI
+ #define HAS_FIXED_REGISTER_SET 1 // Has a fixed register set
#define REGNUM_BITS 7 // number of bits in a REG_*
#define REGSIZE_BYTES 8 // number of bytes in one register
#define XMM_REGSIZE_BYTES 16 // XMM register size in bytes
diff --git a/src/coreclr/jit/targetarm.h b/src/coreclr/jit/targetarm.h
index 9cfd1077c72d52..7cf8995c544b83 100644
--- a/src/coreclr/jit/targetarm.h
+++ b/src/coreclr/jit/targetarm.h
@@ -11,7 +11,6 @@
// TODO-ARM-CQ: Check for sdiv/udiv at runtime and generate it if available
#define USE_HELPERS_FOR_INT_DIV 1 // BeagleBoard (ARMv7A) doesn't support SDIV/UDIV
#define CPU_LOAD_STORE_ARCH 1
- #define ROUND_FLOAT 0 // Do not round intermed float expression results
#define CPU_HAS_BYTE_REGS 0
#define FEATURE_FIXED_OUT_ARGS 1 // Preallocate the outgoing arg area in the prolog
@@ -23,7 +22,6 @@
#define FEATURE_MULTIREG_ARGS_OR_RET 1 // Support for passing and/or returning single values in more than one register (including HFA support)
#define FEATURE_MULTIREG_ARGS 1 // Support for passing a single argument in more than one register (including passing HFAs)
#define FEATURE_MULTIREG_RET 1 // Support for returning a single value in more than one register (including HFA returns)
- #define FEATURE_STRUCT_CLASSIFIER 0 // Uses a classifier function to determine is structs are passed/returned in more than one register
#define MAX_PASS_SINGLEREG_BYTES 8 // Maximum size of a struct passed in a single register (double).
#define MAX_PASS_MULTIREG_BYTES 32 // Maximum size of a struct that could be passed in more than one register (Max is an HFA of 4 doubles)
#define MAX_RET_MULTIREG_BYTES 32 // Maximum size of a struct that could be returned in more than one register (Max is an HFA of 4 doubles)
@@ -35,18 +33,21 @@
#define NOGC_WRITE_BARRIERS 0 // We DO-NOT have specialized WriteBarrier JIT Helpers that DO-NOT trash the RBM_CALLEE_TRASH registers
#define USER_ARGS_COME_LAST 1
- #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really
- // need to track stack depth, but this is currently necessary to get GC information reported at call sites.
#define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target
- #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses.
#define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods
+
#define CSE_CONSTS 1 // Enable if we want to CSE constants
+ #define LOWER_DECOMPOSE_LONGS 1 // Decompose TYP_LONG operations into (typically two) TYP_INT ones
+ #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really
+ // need to track stack depth, but this is currently necessary to get GC information reported at call sites.
+ #define EMIT_GENERATE_GCINFO 1 // Track GC ref liveness in codegen and emit and generate GCInfo based on that
#define REG_FP_FIRST REG_F0
#define REG_FP_LAST REG_F31
#define FIRST_FP_ARGREG REG_F0
#define LAST_FP_ARGREG REG_F15
+ #define HAS_FIXED_REGISTER_SET 1 // Has a fixed register set
#define REGNUM_BITS 6 // number of bits in a REG_*
#define REGSIZE_BYTES 4 // number of bytes in one register
#define MIN_ARG_AREA_FOR_CALL 0 // Minimum required outgoing argument space for a call.
diff --git a/src/coreclr/jit/targetarm64.h b/src/coreclr/jit/targetarm64.h
index c00ac9aeefeb55..10e1bd9e5d5961 100644
--- a/src/coreclr/jit/targetarm64.h
+++ b/src/coreclr/jit/targetarm64.h
@@ -8,7 +8,6 @@
// clang-format off
#define CPU_LOAD_STORE_ARCH 1
- #define ROUND_FLOAT 0 // Do not round intermed float expression results
#define CPU_HAS_BYTE_REGS 0
#ifdef FEATURE_SIMD
@@ -25,7 +24,6 @@
#define FEATURE_MULTIREG_ARGS_OR_RET 1 // Support for passing and/or returning single values in more than one register
#define FEATURE_MULTIREG_ARGS 1 // Support for passing a single argument in more than one register
#define FEATURE_MULTIREG_RET 1 // Support for returning a single value in more than one register
- #define FEATURE_STRUCT_CLASSIFIER 0 // Uses a classifier function to determine is structs are passed/returned in more than one register
#define MAX_PASS_SINGLEREG_BYTES 16 // Maximum size of a struct passed in a single register (16-byte vector).
#define MAX_PASS_MULTIREG_BYTES 64 // Maximum size of a struct that could be passed in more than one register (max is 4 16-byte vectors using an HVA)
#define MAX_RET_MULTIREG_BYTES 64 // Maximum size of a struct that could be returned in more than one register (Max is an HVA of 4 16-byte vectors)
@@ -37,12 +35,13 @@
#define NOGC_WRITE_BARRIERS 1 // We have specialized WriteBarrier JIT Helpers that DO-NOT trash the RBM_CALLEE_TRASH registers
#define USER_ARGS_COME_LAST 1
- #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really
- // need to track stack depth, but this is currently necessary to get GC information reported at call sites.
#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target
- #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses.
#define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods
+
#define CSE_CONSTS 1 // Enable if we want to CSE constants
+ #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really
+ // need to track stack depth, but this is currently necessary to get GC information reported at call sites.
+ #define EMIT_GENERATE_GCINFO 1 // Track GC ref liveness in codegen and emit and generate GCInfo based on that
#define REG_FP_FIRST REG_V0
#define REG_FP_LAST REG_V31
@@ -59,6 +58,7 @@
static_assert(REG_PREDICATE_HIGH_LAST == REG_PREDICATE_LAST);
+ #define HAS_FIXED_REGISTER_SET 1 // Has a fixed register set
#define REGNUM_BITS 7 // number of bits in a REG_*
#define REGSIZE_BYTES 8 // number of bytes in one general purpose register
#define FP_REGSIZE_BYTES 16 // number of bytes in one FP/SIMD register
diff --git a/src/coreclr/jit/targetloongarch64.h b/src/coreclr/jit/targetloongarch64.h
index 7ff82e51840811..39668b3035a488 100644
--- a/src/coreclr/jit/targetloongarch64.h
+++ b/src/coreclr/jit/targetloongarch64.h
@@ -13,7 +13,6 @@
// clang-format off
#define CPU_LOAD_STORE_ARCH 1
#define CPU_HAS_FP_SUPPORT 1
- #define ROUND_FLOAT 0 // Do not round intermed float expression results
#define CPU_HAS_BYTE_REGS 0
#ifdef FEATURE_SIMD
@@ -31,7 +30,6 @@
#define FEATURE_MULTIREG_ARGS_OR_RET 1 // Support for passing and/or returning single values in more than one register
#define FEATURE_MULTIREG_ARGS 1 // Support for passing a single argument in more than one register
#define FEATURE_MULTIREG_RET 1 // Support for returning a single value in more than one register
- #define FEATURE_STRUCT_CLASSIFIER 0 // Uses a classifier function to determine is structs are passed/returned in more than one register
#define MAX_PASS_SINGLEREG_BYTES 8 // Maximum size of a struct passed in a single register (8-byte).
#define MAX_PASS_MULTIREG_BYTES 16 // Maximum size of a struct that could be passed in more than one register
#define MAX_RET_MULTIREG_BYTES 16 // Maximum size of a struct that could be returned in more than one register (Max is an HFA of 2 doubles)
@@ -42,18 +40,20 @@
#define NOGC_WRITE_BARRIERS 1 // We have specialized WriteBarrier JIT Helpers that DO-NOT trash the RBM_CALLEE_TRASH registers
#define USER_ARGS_COME_LAST 1
- #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really
- // need to track stack depth, but this is currently necessary to get GC information reported at call sites.
#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target
- #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses.
#define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods
+
#define CSE_CONSTS 1 // Enable if we want to CSE constants
+ #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really
+ // need to track stack depth, but this is currently necessary to get GC information reported at call sites.
+ #define EMIT_GENERATE_GCINFO 1 // Track GC ref liveness in codegen and emit and generate GCInfo based on that
#define REG_FP_FIRST REG_F0
#define REG_FP_LAST REG_F31
#define FIRST_FP_ARGREG REG_F0
#define LAST_FP_ARGREG REG_F7
+ #define HAS_FIXED_REGISTER_SET 1 // Has a fixed register set
#define REGNUM_BITS 6 // number of bits in a REG_* within registerloongarch64.h
#define REGSIZE_BYTES 8 // number of bytes in one general purpose register
#define FP_REGSIZE_BYTES 8 // number of bytes in one FP register
@@ -94,13 +94,9 @@
REG_F24,REG_F25,REG_F26,REG_F27,REG_F28,REG_F29,REG_F30,REG_F31, \
REG_F1,REG_F0
- #define RBM_CALL_GC_REGS_ORDER RBM_S0,RBM_S1,RBM_S2,RBM_S3,RBM_S4,RBM_S5,RBM_S6,RBM_S7,RBM_S8,RBM_INTRET,RBM_INTRET_1
- #define RBM_CALL_GC_REGS (RBM_S0|RBM_S1|RBM_S2|RBM_S3|RBM_S4|RBM_S5|RBM_S6|RBM_S7|RBM_S8|RBM_INTRET|RBM_INTRET_1)
-
#define CNT_CALLEE_SAVED (10) //s0-s8,fp.
#define CNT_CALLEE_TRASH (17)
#define CNT_CALLEE_ENREG (CNT_CALLEE_SAVED-1)
- #define CNT_CALL_GC_REGS (CNT_CALLEE_SAVED+2)
#define CNT_CALLEE_SAVED_FLOAT (8)
#define CNT_CALLEE_TRASH_FLOAT (24)
diff --git a/src/coreclr/jit/targetriscv64.h b/src/coreclr/jit/targetriscv64.h
index 93915e1380bedb..33e32887208510 100644
--- a/src/coreclr/jit/targetriscv64.h
+++ b/src/coreclr/jit/targetriscv64.h
@@ -9,7 +9,6 @@
// clang-format off
#define CPU_LOAD_STORE_ARCH 1
#define CPU_HAS_FP_SUPPORT 1
- #define ROUND_FLOAT 0 // Do not round intermed float expression results
#define CPU_HAS_BYTE_REGS 0
@@ -26,7 +25,6 @@
#define FEATURE_MULTIREG_ARGS_OR_RET 1 // Support for passing and/or returning single values in more than one register
#define FEATURE_MULTIREG_ARGS 1 // Support for passing a single argument in more than one register
#define FEATURE_MULTIREG_RET 1 // Support for returning a single value in more than one register
- #define FEATURE_STRUCT_CLASSIFIER 0 // Uses a classifier function to determine is structs are passed/returned in more than one register
#define MAX_PASS_SINGLEREG_BYTES 8 // Maximum size of a struct passed in a single register (8-byte vector).
#define MAX_PASS_MULTIREG_BYTES 16 // Maximum size of a struct that could be passed in more than one register
#define MAX_RET_MULTIREG_BYTES 16 // Maximum size of a struct that could be returned in more than one register (Max is an HFA or 2 doubles)
@@ -37,18 +35,20 @@
#define NOGC_WRITE_BARRIERS 1 // We have specialized WriteBarrier JIT Helpers that DO-NOT trash the RBM_CALLEE_TRASH registers
#define USER_ARGS_COME_LAST 1
- #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really
- // need to track stack depth, but this is currently necessary to get GC information reported at call sites.
#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target
- #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter, filter-handler, fault) and directly execute 'finally' clauses.
#define ETW_EBP_FRAMED 1 // if 1 we cannot use REG_FP as a scratch register and must setup the frame pointer for most methods
+
#define CSE_CONSTS 1 // Enable if we want to CSE constants
+ #define EMIT_TRACK_STACK_DEPTH 1 // This is something of a workaround. For both ARM and AMD64, the frame size is fixed, so we don't really
+ // need to track stack depth, but this is currently necessary to get GC information reported at call sites.
+ #define EMIT_GENERATE_GCINFO 1 // Track GC ref liveness in codegen and emit and generate GCInfo based on that
#define REG_FP_FIRST REG_FT0
#define REG_FP_LAST REG_FT11
#define FIRST_FP_ARGREG REG_FA0
#define LAST_FP_ARGREG REG_FA7
+ #define HAS_FIXED_REGISTER_SET 1 // Has a fixed register set
#define REGNUM_BITS 6 // number of bits in a REG_*
#define REGSIZE_BYTES 8 // number of bytes in one general purpose register
#define FP_REGSIZE_BYTES 8 // number of bytes in one FP/SIMD register
@@ -89,13 +89,9 @@
REG_FS6, REG_FS7, REG_FS8, REG_FS9, REG_FS10, REG_FS11, REG_FS2, REG_FS3, REG_FS4, REG_FS5, REG_FS0, REG_FS1, \
REG_FA1, REG_FA0
- #define RBM_CALL_GC_REGS_ORDER RBM_S1,RBM_S2,RBM_S3,RBM_S4,RBM_S5,RBM_S6,RBM_S7,RBM_S8,RBM_S9,RBM_S10,RBM_S11,RBM_INTRET,RBM_INTRET_1
- #define RBM_CALL_GC_REGS (RBM_S1|RBM_S2|RBM_S3|RBM_S4|RBM_S5|RBM_S6|RBM_S7|RBM_S8|RBM_S9|RBM_S10|RBM_S11|RBM_INTRET|RBM_INTRET_1)
-
#define CNT_CALLEE_SAVED (11)
#define CNT_CALLEE_TRASH (15)
#define CNT_CALLEE_ENREG (CNT_CALLEE_SAVED-1)
- #define CNT_CALL_GC_REGS (CNT_CALLEE_SAVED+2)
#define CNT_CALLEE_SAVED_FLOAT (12)
#define CNT_CALLEE_TRASH_FLOAT (20)
diff --git a/src/coreclr/jit/targetwasm.cpp b/src/coreclr/jit/targetwasm.cpp
new file mode 100644
index 00000000000000..3c74f66ce4f94f
--- /dev/null
+++ b/src/coreclr/jit/targetwasm.cpp
@@ -0,0 +1,51 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+#include "jitpch.h"
+#ifdef _MSC_VER
+#pragma hdrstop
+#endif
+
+#ifdef TARGET_WASM32
+#define CPU_NAME "wasm32";
+#else
+#define CPU_NAME "wasm64";
+#endif
+
+const char* Target::g_tgtCPUName = CPU_NAME;
+const Target::ArgOrder Target::g_tgtArgOrder = ARG_ORDER_R2L;
+const Target::ArgOrder Target::g_tgtUnmanagedArgOrder = ARG_ORDER_R2L;
+
+//-----------------------------------------------------------------------------
+// WasmClassifier:
+// Construct a new instance of the Wasm ABI classifier.
+//
+// Parameters:
+// info - Info about the method being classified.
+//
+WasmClassifier::WasmClassifier(const ClassifierInfo& info)
+{
+}
+
+//-----------------------------------------------------------------------------
+// Classify:
+// Classify a parameter for the Wasm ABI.
+//
+// Parameters:
+// comp - Compiler instance
+// type - The type of the parameter
+// structLayout - The layout of the struct. Expected to be non-null if
+// varTypeIsStruct(type) is true.
+// wellKnownParam - Well known type of the parameter (if it may affect its ABI classification)
+//
+// Returns:
+// Classification information for the parameter.
+//
+ABIPassingInformation WasmClassifier::Classify(Compiler* comp,
+ var_types type,
+ ClassLayout* structLayout,
+ WellKnownArg wellKnownParam)
+{
+ NYI_WASM("WasmClassifier::Classify");
+ return {};
+}
diff --git a/src/coreclr/jit/targetwasm.h b/src/coreclr/jit/targetwasm.h
new file mode 100644
index 00000000000000..cdcd497fff5adb
--- /dev/null
+++ b/src/coreclr/jit/targetwasm.h
@@ -0,0 +1,284 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+#pragma once
+
+#if !defined(TARGET_WASM)
+#error The file should not be included for this platform.
+#endif
+
+// clang-format off
+#define CPU_LOAD_STORE_ARCH 1
+#define CPU_HAS_FP_SUPPORT 1
+#define CPU_HAS_BYTE_REGS 0
+
+// Currently we don't pass any arguments on the linear memory stack so FEATURE_FIXED_OUT_ARGS is not needed.
+#define FEATURE_FIXED_OUT_ARGS 0 // Preallocate the outgoing arg area in the prolog
+#define FEATURE_STRUCTPROMOTE 1 // JIT Optimization to promote fields of structs into registers
+#define FEATURE_MULTIREG_STRUCT_PROMOTE 1 // True when we want to promote fields of a multireg struct into registers
+#define FEATURE_FASTTAILCALL 0 // Tail calls made as epilog+jmp
+#define FEATURE_TAILCALL_OPT 0 // opportunistic Tail calls (i.e. without ".tail" prefix) made as fast tail calls.
+#define FEATURE_IMPLICIT_BYREFS 1 // Support for struct parameters passed via pointers to shadow copies
+#define FEATURE_MULTIREG_ARGS_OR_RET 0 // Support for passing and/or returning single values in more than one register
+#define FEATURE_MULTIREG_ARGS 0 // Support for passing a single argument in more than one register
+#define FEATURE_MULTIREG_RET 0 // Support for returning a single value in more than one register
+#define MAX_PASS_SINGLEREG_BYTES 8 // Maximum size of a struct passed in a single register (long/double).
+#define MAX_PASS_MULTIREG_BYTES 0 // Maximum size of a struct that could be passed in more than one register
+#define MAX_RET_MULTIREG_BYTES 0 // Maximum size of a struct that could be returned in more than one register (Max is an HFA or 2 doubles)
+#define MAX_ARG_REG_COUNT 1 // Maximum registers used to pass a single argument in multiple registers.
+#define MAX_RET_REG_COUNT 1 // Maximum registers used to return a value.
+#define MAX_MULTIREG_COUNT 2 // Maximum number of registers defined by a single instruction (including calls).
+ // This is also the maximum number of registers for a MultiReg node.
+
+#define NOGC_WRITE_BARRIERS 0 // No specialized WriteBarrier JIT Helpers
+#define USER_ARGS_COME_LAST 1
+#ifdef TARGET_WASM32
+#define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this target
+#else
+#define TARGET_POINTER_SIZE 8 // equal to sizeof(void*) and the managed pointer size in bytes for this target
+#endif
+#define ETW_EBP_FRAMED 0 // No frame pointer chaining on WASM
+
+// TODO-WASM-CQ: measure if "CSE_CONSTS" is beneficial.
+#define CSE_CONSTS 1 // Enable if we want to CSE constants
+#define EMIT_TRACK_STACK_DEPTH 1 // TODO-WASM: set to 0.
+#define EMIT_GENERATE_GCINFO 0 // Codegen and emit not responsible for GC liveness tracking and GCInfo generation
+
+// Since we don't have a fixed register set on WASM, we set most of the following register defines to 'none'-like values.
+#define REG_FP_FIRST REG_NA
+#define REG_FP_LAST REG_NA
+#define FIRST_FP_ARGREG REG_NA
+#define LAST_FP_ARGREG REG_NA
+
+#define HAS_FIXED_REGISTER_SET 0 // WASM has an unlimited number of locals/registers.
+#define REGNUM_BITS 1 // number of bits in a REG_*
+#define REGSIZE_BYTES TARGET_POINTER_SIZE // number of bytes in one general purpose register
+#define FP_REGSIZE_BYTES 8 // number of bytes in one FP/SIMD register
+#define FPSAVE_REGSIZE_BYTES 8 // number of bytes in one FP/SIMD register that are saved/restored, for callee-saved registers
+
+#define MIN_ARG_AREA_FOR_CALL 0 // Minimum required outgoing argument space for a call.
+
+#define CODE_ALIGN 1 // code alignment requirement
+#define STACK_ALIGN 16 // stack alignment requirement
+
+#define FIRST_INT_CALLEE_SAVED REG_NA
+#define LAST_INT_CALLEE_SAVED REG_NA
+#define RBM_INT_CALLEE_SAVED RBM_NONE
+#define RBM_INT_CALLEE_TRASH RBM_NONE
+#define FIRST_FLT_CALLEE_SAVED REG_NA
+#define LAST_FLT_CALLEE_SAVED REG_NA
+#define RBM_FLT_CALLEE_SAVED RBM_NONE
+#define RBM_FLT_CALLEE_TRASH RBM_NONE
+
+#define RBM_CALLEE_SAVED RBM_NONE
+#define RBM_CALLEE_TRASH RBM_NONE
+
+#define REG_DEFAULT_HELPER_CALL_TARGET REG_NA
+#define RBM_DEFAULT_HELPER_CALL_TARGET REG_NA
+
+#define RBM_ALLINT RBM_NONE
+#define RBM_ALLFLOAT RBM_NONE
+#define RBM_ALLDOUBLE RBM_NONE
+
+#define REG_VAR_ORDER
+#define REG_VAR_ORDER_FLT
+
+// The defines below affect CSE heuristics, so we need to give them some 'sensible' values.
+#define CNT_CALLEE_SAVED (8)
+#define CNT_CALLEE_TRASH (8)
+#define CNT_CALLEE_ENREG (CNT_CALLEE_SAVED)
+
+#define CNT_CALLEE_SAVED_FLOAT (10)
+#define CNT_CALLEE_TRASH_FLOAT (10)
+#define CNT_CALLEE_ENREG_FLOAT (CNT_CALLEE_SAVED_FLOAT)
+
+#define CNT_CALLEE_SAVED_MASK (0)
+#define CNT_CALLEE_TRASH_MASK (0)
+#define CNT_CALLEE_ENREG_MASK (CNT_CALLEE_SAVED_MASK)
+
+#define CALLEE_SAVED_REG_MAXSZ (CNT_CALLEE_SAVED * REGSIZE_BYTES)
+#define CALLEE_SAVED_FLOAT_MAXSZ (CNT_CALLEE_SAVED_FLOAT * FPSAVE_REGSIZE_BYTES)
+
+#define REG_TMP_0 REG_NA
+
+// Temporary registers used for the GS cookie check.
+#define REG_GSCOOKIE_TMP_0 REG_NA
+#define REG_GSCOOKIE_TMP_1 REG_NA
+
+// register to hold shift amount
+#define REG_SHIFT REG_NA
+#define RBM_SHIFT RBM_ALLINT
+
+// This is a general scratch register that does not conflict with the argument registers
+#define REG_SCRATCH REG_NA
+
+// This is a general register that can be optionally reserved for other purposes during codegen
+#define REG_OPT_RSVD REG_NA
+#define RBM_OPT_RSVD RBM_NONE
+
+// Where is the exception object on entry to the handler block?
+#define REG_EXCEPTION_OBJECT REG_NA
+#define RBM_EXCEPTION_OBJECT RBM_NONE
+
+#define REG_JUMP_THUNK_PARAM REG_NA
+#define RBM_JUMP_THUNK_PARAM RBM_NONE
+
+#define REG_WRITE_BARRIER_DST REG_NA
+#define RBM_WRITE_BARRIER_DST RBM_NONE
+
+#define REG_WRITE_BARRIER_SRC REG_NA
+#define RBM_WRITE_BARRIER_SRC RBM_NONE
+
+#define REG_WRITE_BARRIER_DST_BYREF REG_NA
+#define RBM_WRITE_BARRIER_DST_BYREF RBM_NONE
+
+#define REG_WRITE_BARRIER_SRC_BYREF REG_NA
+#define RBM_WRITE_BARRIER_SRC_BYREF RBM_NONE
+
+#define RBM_CALLEE_TRASH_NOGC RBM_NONE
+
+// Registers killed by CORINFO_HELP_ASSIGN_REF and CORINFO_HELP_CHECKED_ASSIGN_REF.
+#define RBM_CALLEE_TRASH_WRITEBARRIER RBM_NONE
+
+// Registers no longer containing GC pointers after CORINFO_HELP_ASSIGN_REF and CORINFO_HELP_CHECKED_ASSIGN_REF.
+#define RBM_CALLEE_GCTRASH_WRITEBARRIER RBM_CALLEE_TRASH_NOGC
+
+// Registers killed by CORINFO_HELP_ASSIGN_BYREF.
+#define RBM_CALLEE_TRASH_WRITEBARRIER_BYREF RBM_NONE
+
+// Registers no longer containing GC pointers after CORINFO_HELP_ASSIGN_BYREF.
+#define RBM_CALLEE_GCTRASH_WRITEBARRIER_BYREF RBM_NONE
+
+// GenericPInvokeCalliHelper VASigCookie Parameter
+#define REG_PINVOKE_COOKIE_PARAM REG_NA
+#define RBM_PINVOKE_COOKIE_PARAM RBM_NONE
+
+// GenericPInvokeCalliHelper unmanaged target Parameter
+#define REG_PINVOKE_TARGET_PARAM REG_NA
+#define RBM_PINVOKE_TARGET_PARAM RBM_NONE
+
+// IL stub's secret MethodDesc parameter (JitFlags::JIT_FLAG_PUBLISH_SECRET_PARAM)
+#define REG_SECRET_STUB_PARAM REG_NA
+#define RBM_SECRET_STUB_PARAM RBM_NONE
+
+// R2R indirect call. Use the same registers as VSD
+#define REG_R2R_INDIRECT_PARAM REG_NA
+#define RBM_R2R_INDIRECT_PARAM RBM_NONE
+
+// JMP Indirect call register
+#define REG_INDIRECT_CALL_TARGET_REG REG_NA
+
+// The following defines are useful for iterating a regNumber
+#define REG_FIRST REG_NA
+#define REG_INT_FIRST REG_NA
+#define REG_INT_LAST REG_NA
+#define REG_INT_COUNT (REG_INT_LAST - REG_INT_FIRST + 1)
+#define REG_NEXT(reg) ((regNumber)((unsigned)(reg) + 1))
+#define REG_PREV(reg) ((regNumber)((unsigned)(reg) - 1))
+
+// The following registers are used in emitting Enter/Leave/Tailcall profiler callbacks
+#define REG_PROFILER_ENTER_ARG_FUNC_ID REG_NA
+#define RBM_PROFILER_ENTER_ARG_FUNC_ID RBM_NONE
+#define REG_PROFILER_ENTER_ARG_CALLER_SP REG_NA
+#define RBM_PROFILER_ENTER_ARG_CALLER_SP RBM_NONE
+#define REG_PROFILER_LEAVE_ARG_FUNC_ID REG_PROFILER_ENTER_ARG_FUNC_ID
+#define RBM_PROFILER_LEAVE_ARG_FUNC_ID RBM_PROFILER_ENTER_ARG_FUNC_ID
+#define REG_PROFILER_LEAVE_ARG_CALLER_SP REG_PROFILER_ENTER_ARG_CALLER_SP
+#define RBM_PROFILER_LEAVE_ARG_CALLER_SP RBM_PROFILER_ENTER_ARG_CALLER_SP
+
+// The registers trashed by profiler enter/leave/tailcall hook
+#define RBM_PROFILER_ENTER_TRASH RBM_NONE
+#define RBM_PROFILER_LEAVE_TRASH RBM_PROFILER_ENTER_TRASH
+#define RBM_PROFILER_TAILCALL_TRASH RBM_PROFILER_LEAVE_TRASH
+
+// Which register are int and long values returned in ?
+#define REG_INTRET REG_NA
+#define RBM_INTRET RBM_NONE
+#define REG_LNGRET REG_NA
+#define RBM_LNGRET RBM_NONE
+
+#define REG_FLOATRET REG_NA
+#define RBM_FLOATRET RBM_NONE
+#define RBM_DOUBLERET RBM_NONE
+
+// The registers trashed by the CORINFO_HELP_STOP_FOR_GC helper
+#define RBM_STOP_FOR_GC_TRASH RBM_CALLEE_TRASH
+
+// The registers trashed by the CORINFO_HELP_INIT_PINVOKE_FRAME helper.
+#define RBM_INIT_PINVOKE_FRAME_TRASH RBM_CALLEE_TRASH
+
+#define RBM_VALIDATE_INDIRECT_CALL_TRASH RBM_NONE
+#define REG_VALIDATE_INDIRECT_CALL_ADDR REG_NA
+#define REG_DISPATCH_INDIRECT_CALL_ADDR REG_NA
+
+#define REG_ASYNC_CONTINUATION_RET REG_NA
+#define RBM_ASYNC_CONTINUATION_RET RBM_NONE
+
+#define REG_FPBASE REG_NA
+#define RBM_FPBASE RBM_NONE
+#define STR_FPBASE ""
+#define REG_SPBASE REG_NA
+#define RBM_SPBASE RBM_NONE
+#define STR_SPBASE ""
+
+#define FIRST_ARG_STACK_OFFS 0
+
+#define MAX_REG_ARG -1
+#define MAX_FLOAT_REG_ARG -1
+
+#define REG_ARG_FIRST REG_NA
+#define REG_ARG_LAST REG_NA
+#define REG_ARG_FP_FIRST REG_NA
+#define REG_ARG_FP_LAST REG_NA
+#define INIT_ARG_STACK_SLOT 0 // No outgoing reserved stack slots
+
+#define REG_ARG_0 REG_NA
+#define RBM_ARG_0 RBM_NONE
+
+#define RBM_ARG_REGS RBM_NONE
+#define RBM_FLTARG_REGS RBM_NONE
+
+// The number of bytes from the end the last probed page that must also be probed, to allow for some
+// small SP adjustments without probes. If zero, then the stack pointer can point to the last byte/word
+// on the stack guard page, and must be touched before any further "SUB SP".
+#define STACK_PROBE_BOUNDARY_THRESHOLD_BYTES 0
+
+// clang-format on
+
+// TODO-WASM: implement the following functions in terms of a "locals registry" that would hold information
+// about the registers.
+
+inline bool genIsValidReg(regNumber reg)
+{
+ NYI_WASM("genIsValidReg");
+ return false;
+}
+
+inline bool genIsValidIntReg(regNumber reg)
+{
+ NYI_WASM("genIsValidIntReg");
+ return false;
+}
+
+inline bool genIsValidIntOrFakeReg(regNumber reg)
+{
+ NYI_WASM("genIsValidIntOrFakeReg");
+ return false;
+}
+
+inline bool genIsValidFloatReg(regNumber reg)
+{
+ NYI_WASM("genIsValidFloatReg");
+ return false;
+}
+
+inline bool isValidIntArgReg(regNumber reg, CorInfoCallConvExtension callConv)
+{
+ NYI_WASM("isValidIntArgReg");
+ return false;
+}
+
+inline bool isValidFloatArgReg(regNumber reg)
+{
+ NYI_WASM("isValidFloatArgReg");
+ return false;
+}
diff --git a/src/coreclr/jit/targetx86.h b/src/coreclr/jit/targetx86.h
index 01a85332e50bd3..feab3f561a593a 100644
--- a/src/coreclr/jit/targetx86.h
+++ b/src/coreclr/jit/targetx86.h
@@ -8,7 +8,6 @@
// clang-format off
#define CPU_LOAD_STORE_ARCH 0
- #define ROUND_FLOAT 1 // round intermed float expression results
#define CPU_HAS_BYTE_REGS 1
#define CPOBJ_NONGC_SLOTS_LIMIT 4 // For CpObj code generation, this is the threshold of the number
@@ -47,14 +46,15 @@
// ASM barriers we definitely don't have NOGC barriers).
#endif
#define USER_ARGS_COME_LAST 0
- #define EMIT_TRACK_STACK_DEPTH 1
#define TARGET_POINTER_SIZE 4 // equal to sizeof(void*) and the managed pointer size in bytes for this
// target
- #define FEATURE_EH 1 // To aid platform bring-up, eliminate exceptional EH clauses (catch, filter,
- // filter-handler, fault) and directly execute 'finally' clauses.
#define ETW_EBP_FRAMED 1 // if 1 we cannot use EBP as a scratch register and must create EBP based
// frames for most methods
+
#define CSE_CONSTS 1 // Enable if we want to CSE constants
+ #define LOWER_DECOMPOSE_LONGS 1 // Decompose TYP_LONG operations into (typically two) TYP_INT ones
+ #define EMIT_TRACK_STACK_DEPTH 1
+ #define EMIT_GENERATE_GCINFO 1 // Track GC ref liveness in codegen and emit and generate GCInfo based on that
// The following defines are useful for iterating a regNumber
#define REG_FIRST REG_EAX
@@ -115,8 +115,8 @@
#define YMM_REGSIZE_BYTES 32 // YMM register size in bytes
#define ZMM_REGSIZE_BYTES 64 // ZMM register size in bytes
+ #define HAS_FIXED_REGISTER_SET 1 // Has a fixed register set
#define REGNUM_BITS 6 // number of bits in a REG_*
-
#define REGSIZE_BYTES 4 // number of bytes in one register
#define MIN_ARG_AREA_FOR_CALL 0 // Minimum required outgoing argument space for a call.
diff --git a/src/coreclr/jit/utils.cpp b/src/coreclr/jit/utils.cpp
index 98e11c71ff371b..7fa9182c9d721b 100644
--- a/src/coreclr/jit/utils.cpp
+++ b/src/coreclr/jit/utils.cpp
@@ -283,6 +283,7 @@ const char* getRegNameFloat(regNumber reg, var_types type)
*/
const char* dspRegRange(regMaskTP regMask, size_t& minSiz, const char* sep, regNumber regFirst, regNumber regLast)
{
+#if HAS_FIXED_REGISTER_SET
#ifdef FEATURE_MASKED_HW_INTRINSICS
assert(((regFirst == REG_INT_FIRST) && (regLast == REG_INT_LAST)) ||
((regFirst == REG_FP_FIRST) && (regLast == REG_FP_LAST)) ||
@@ -419,6 +420,7 @@ const char* dspRegRange(regMaskTP regMask, size_t& minSiz, const char* sep, regN
regPrev = regNum;
}
+#endif // HAS_FIXED_REGISTER_SET
return sep;
}
diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp
index 22b433629c26dc..b62a9df8f34c37 100644
--- a/src/coreclr/jit/valuenum.cpp
+++ b/src/coreclr/jit/valuenum.cpp
@@ -46,6 +46,11 @@ struct FloatTraits
unsigned bits = 0xFFC00000u;
#elif defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
unsigned bits = 0x7FC00000u;
+#elif defined(TARGET_WASM)
+ // TODO-WASM: this may prove tricker than it seems since there are two possible "canonical"
+ // NaN values. We may need to introduce a new "unknown" value to be returned here.
+ NYI_WASM("FloatTraits::NaN");
+ unsigned bits = 0;
#else
#error Unsupported or unset target architecture
#endif
@@ -72,6 +77,11 @@ struct DoubleTraits
unsigned long long bits = 0xFFF8000000000000ull;
#elif defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
unsigned long long bits = 0x7FF8000000000000ull;
+#elif defined(TARGET_WASM)
+ // TODO-WASM: this may prove tricker than it seems since there are two possible "canonical"
+ // NaN values. We may need to introduce a new "unknown" value to be returned here.
+ NYI_WASM("DoubleTraits::NaN");
+ unsigned long long bits = 0;
#else
#error Unsupported or unset target architecture
#endif
diff --git a/src/coreclr/jit/valuenumfuncs.h b/src/coreclr/jit/valuenumfuncs.h
index f4da699bed46c7..f1a19ec9f55432 100644
--- a/src/coreclr/jit/valuenumfuncs.h
+++ b/src/coreclr/jit/valuenumfuncs.h
@@ -193,17 +193,17 @@ ValueNumFuncDef(HWI_##isa##_##name, ((argCount == -1) ? -1 : (argCount + 1)), ((
#define VNF_HWI_FIRST VNF_HWI_Vector128_Abs
#define VNF_HWI_LAST VNF_HWI_AVX512_XnorMask
-#elif defined (TARGET_ARM64)
+#elif defined(TARGET_ARM64)
#define HARDWARE_INTRINSIC(isa, name, size, argCount, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, category, flag) \
ValueNumFuncDef(HWI_##isa##_##name, ((argCount == -1) ? -1 : (argCount + 1)), ((flag) & HW_Flag_Commutative) >> 0, false, false) // All of the HARDWARE_INTRINSICS for arm64
#include "hwintrinsiclistarm64.h"
#define VNF_HWI_FIRST VNF_HWI_Vector64_Abs
#define VNF_HWI_LAST VNF_HWI_Sve_ReverseElement_Predicates
-#elif defined (TARGET_ARM)
+#elif defined(TARGET_ARM)
// No Hardware Intrinsics on ARM32
-#elif defined (TARGET_LOONGARCH64)
+#elif defined(TARGET_LOONGARCH64)
//TODO-LOONGARCH64-CQ: add LoongArch64's Hardware Intrinsics Instructions if supported.
#elif defined (TARGET_RISCV64)
@@ -212,6 +212,10 @@ ValueNumFuncDef(HWI_##isa##_##name, ((argCount == -1) ? -1 : (argCount + 1)), ((
ValueNumFuncDef(MaxInt, 2, true, false, false)
ValueNumFuncDef(MinInt_UN, 2, true, false, false)
ValueNumFuncDef(MaxInt_UN, 2, true, false, false)
+
+#elif defined(TARGET_WASM)
+// No hardware intrinsics on WASM yet.
+
#else
#error Unsupported platform
#endif
diff --git a/src/coreclr/runtime.proj b/src/coreclr/runtime.proj
index d70f30b12ec4f2..2ef2a02a1282a7 100644
--- a/src/coreclr/runtime.proj
+++ b/src/coreclr/runtime.proj
@@ -69,6 +69,7 @@
<_CoreClrBuildArg Condition="'$(ClrHostsSubset)' == 'true'" Include="-component hosts" />
<_CoreClrBuildArg Condition="'$(ClrRuntimeSubset)' == 'true'" Include="-component runtime" />
<_CoreClrBuildArg Condition="'$(ClrJitSubset)' == 'true'" Include="-component jit" />
+ <_CoreClrBuildArg Condition="'$(ClrWasmJitSubset)' == 'true'" Include="-component wasmjit" />
<_CoreClrBuildArg Condition="'$(ClrPalTestsSubset)' == 'true'" Include="-component paltests" />
<_CoreClrBuildArg Condition="'$(ClrAllJitsSubset)' == 'true'" Include="-component alljits" />
<_CoreClrBuildArg Condition="'$(ClrAllJitsCommunitySubset)' == 'true'" Include="-component alljits;-cmakeargs "-DCLR_CMAKE_BUILD_COMMUNITY_ALTJITS=1"" />