[Lldb-commits] [PATCH] D82857: [LLDB] Add per-thread register infos shared pointer in gdb-remote
omjavaid updated this revision to Diff 314325. omjavaid added a comment. Rebased CHANGES SINCE LAST ACTION https://reviews.llvm.org/D82857/new/ https://reviews.llvm.org/D82857 Files: lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.cpp lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.h Index: lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.h === --- lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.h +++ lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.h @@ -14,6 +14,8 @@ #include "lldb/Target/Thread.h" #include "lldb/Utility/StructuredData.h" +#include "GDBRemoteRegisterContext.h" + class StringExtractor; namespace lldb_private { @@ -101,6 +103,8 @@ m_queue_serial_number; // Queue info from stop reply/stop info for thread lldb_private::LazyBool m_associated_with_libdispatch_queue; + GDBRemoteDynamicRegisterInfoSP m_reg_info_sp; + bool PrivateSetRegisterValue(uint32_t reg, llvm::ArrayRef data); bool PrivateSetRegisterValue(uint32_t reg, uint64_t regval); Index: lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.cpp === --- lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.cpp +++ lldb/source/Plugins/Process/gdb-remote/ThreadGDBRemote.cpp @@ -42,6 +42,14 @@ Log *log(GetLogIfAnyCategoriesSet(GDBR_LOG_THREAD)); LLDB_LOG(log, "this = {0}, pid = {1}, tid = {2}", this, process.GetID(), GetID()); + // At this point we can clone reg_info for architectures supporting + // run-time update to register sizes and offsets.. + auto &gdb_process = static_cast(process); + if (!gdb_process.m_register_info_sp->IsReconfigurable()) +m_reg_info_sp = gdb_process.m_register_info_sp; + else +m_reg_info_sp = std::make_shared( +*gdb_process.m_register_info_sp); } ThreadGDBRemote::~ThreadGDBRemote() { @@ -307,8 +315,8 @@ !pSupported || gdb_process->m_use_g_packet_for_reading; bool write_all_registers_at_once = !pSupported; reg_ctx_sp = std::make_shared( - *this, concrete_frame_idx, gdb_process->m_register_info, - read_all_registers_at_once, write_all_registers_at_once); + *this, concrete_frame_idx, m_reg_info_sp, read_all_registers_at_once, + write_all_registers_at_once); } } else { reg_ctx_sp = GetUnwinder().CreateRegisterContextForFrame(frame); Index: lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h === --- lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h +++ lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.h @@ -254,7 +254,7 @@ // the last stop // packet variable std::recursive_mutex m_last_stop_packet_mutex; - GDBRemoteDynamicRegisterInfo m_register_info; + GDBRemoteDynamicRegisterInfoSP m_register_info_sp; Broadcaster m_async_broadcaster; lldb::ListenerSP m_async_listener_sp; HostThread m_async_thread; Index: lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp === --- lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp +++ lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp @@ -249,7 +249,7 @@ ListenerSP listener_sp) : Process(target_sp, listener_sp), m_debugserver_pid(LLDB_INVALID_PROCESS_ID), m_last_stop_packet_mutex(), - m_register_info(), + m_register_info_sp(nullptr), m_async_broadcaster(nullptr, "lldb.process.gdb-remote.async-broadcaster"), m_async_listener_sp( Listener::MakeListener("lldb.process.gdb-remote.async-listener")), @@ -368,8 +368,8 @@ m_breakpoint_pc_offset = breakpoint_pc_int_value->GetValue(); } - if (m_register_info.SetRegisterInfo(*target_definition_sp, - GetTarget().GetArchitecture()) > 0) { + if (m_register_info_sp->SetRegisterInfo( + *target_definition_sp, GetTarget().GetArchitecture()) > 0) { return true; } } @@ -396,10 +396,10 @@ } void ProcessGDBRemote::BuildDynamicRegisterInfo(bool force) { - if (!force && m_register_info.GetNumRegisters() > 0) + if (!force && m_register_info_sp) return; - m_register_info.Clear(); + m_register_info_sp = std::make_shared(); // Check if qHostInfo specified a spec
[Lldb-commits] [PATCH] D94008: [LLDB] DynamicRegisterInfo calculate offsets in separate function
omjavaid created this revision. omjavaid added a reviewer: labath. omjavaid requested review of this revision. This patch pull offset calculation logic out of DynamicRegisterInfo::Finalize into a separate function. We are going to call this function whenever we update SVE register sizes. https://reviews.llvm.org/D94008 Files: lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h Index: lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h === --- lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h +++ lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h @@ -42,6 +42,8 @@ void Finalize(const lldb_private::ArchSpec &arch); + void ConfigureOffsets(); + size_t GetNumRegisters() const; size_t GetNumRegisterSets() const; Index: lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp === --- lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp +++ lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp @@ -442,28 +442,6 @@ m_sets[set].registers = m_set_reg_nums[set].data(); } - // We are going to create a map between remote (eRegisterKindProcessPlugin) - // and local (eRegisterKindLLDB) register numbers. This map will give us - // remote register numbers in increasing order for offset calculation. - std::map remote_to_local_regnum_map; - for (const auto ® : m_regs) -remote_to_local_regnum_map[reg.kinds[eRegisterKindProcessPlugin]] = -reg.kinds[eRegisterKindLLDB]; - - // At this stage we manually calculate g/G packet offsets of all primary - // registers, only if target XML or qRegisterInfo packet did not send - // an offset explicitly. - uint32_t reg_offset = 0; - for (auto const ®num_pair : remote_to_local_regnum_map) { -if (m_regs[regnum_pair.second].byte_offset == LLDB_INVALID_INDEX32 && -m_regs[regnum_pair.second].value_regs == nullptr) { - m_regs[regnum_pair.second].byte_offset = reg_offset; - - reg_offset = m_regs[regnum_pair.second].byte_offset + - m_regs[regnum_pair.second].byte_size; -} - } - // sort and unique all value registers and make sure each is terminated with // LLDB_INVALID_REGNUM @@ -485,24 +463,10 @@ // Now update all value_regs with each register info as needed const size_t num_regs = m_regs.size(); for (size_t i = 0; i < num_regs; ++i) { -if (m_value_regs_map.find(i) != m_value_regs_map.end()) { +if (m_value_regs_map.find(i) != m_value_regs_map.end()) m_regs[i].value_regs = m_value_regs_map[i].data(); - // Assign a valid offset to all pseudo registers if not assigned by stub. - // Pseudo registers with value_regs list populated will share same offset - // as that of their corresponding primary register in value_regs list. - if (m_regs[i].byte_offset == LLDB_INVALID_INDEX32) { -uint32_t value_regnum = m_regs[i].value_regs[0]; -if (value_regnum != LLDB_INVALID_INDEX32) - m_regs[i].byte_offset = - GetRegisterInfoAtIndex(remote_to_local_regnum_map[value_regnum]) - ->byte_offset; - } -} else +else m_regs[i].value_regs = nullptr; - -reg_offset = m_regs[i].byte_offset + m_regs[i].byte_size; -if (m_reg_data_byte_size < reg_offset) - m_reg_data_byte_size = reg_offset; } // Expand all invalidation dependencies @@ -648,6 +612,55 @@ break; } } + + // At this stage call ConfigureOffsets to calculate register offsets for + // targets supporting dynamic offset calculation. It also calculates + // total byte size of register data. + ConfigureOffsets(); +} + +void DynamicRegisterInfo::ConfigureOffsets() { + // We are going to create a map between remote (eRegisterKindProcessPlugin) + // and local (eRegisterKindLLDB) register numbers. This map will give us + // remote register numbers in increasing order for offset calculation. + std::map remote_to_local_regnum_map; + for (const auto ® : m_regs) +remote_to_local_regnum_map[reg.kinds[eRegisterKindProcessPlugin]] = +reg.kinds[eRegisterKindLLDB]; + + // At this stage we manually calculate g/G packet offsets of all primary + // registers, only if target XML or qRegisterInfo packet did not send + // an offset explicitly. + uint32_t reg_offset = 0; + for (auto const ®num_pair : remote_to_local_regnum_map) { +if (m_regs[regnum_pair.second].byte_offset == LLDB_INVALID_INDEX32 && +m_regs[regnum_pair.second].value_regs == nullptr) { + m_regs[regnum_pair.second].byte_offset = reg_offset; + + reg_offset = m_regs[regnum_pair.second].byte_offset + + m_regs[regnum_pair.second].byte_size; +} + } + + // Now update all value_regs with each register info as needed + for (auto ® : m_regs) { +if (reg.value_regs != nullptr)
[Lldb-commits] [PATCH] D82863: [LLDB] Add support to resize SVE registers at run-time
omjavaid updated this revision to Diff 314327. omjavaid added a comment. @labath I have incorporated your suggestions in this update. Invalidate all registers as SVE size update is not a high frequency occurrence. Also using same logic for offset calculation as we did DynamicRegisterInfo::Finalize. Any further changes needed or this is good for commit? CHANGES SINCE LAST ACTION https://reviews.llvm.org/D82863/new/ https://reviews.llvm.org/D82863 Files: lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.h lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp Index: lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp === --- lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp +++ lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp @@ -1763,6 +1763,19 @@ gdb_thread->PrivateSetRegisterValue(pair.first, buffer_sp->GetData()); } + // AArch64 SVE specific code below calls AArch64SVEReconfigure to update + // SVE register sizes and offsets if value of VG register has changed + // since last stop. + const ArchSpec &arch = GetTarget().GetArchitecture(); + if (arch.IsValid() && arch.GetTriple().isAArch64()) { +GDBRemoteRegisterContext *reg_ctx_sp = +static_cast( +gdb_thread->GetRegisterContext().get()); + +if (reg_ctx_sp) + reg_ctx_sp->AArch64SVEReconfigure(); + } + thread_sp->SetName(thread_name.empty() ? nullptr : thread_name.c_str()); gdb_thread->SetThreadDispatchQAddr(thread_dispatch_qaddr); Index: lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h === --- lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h +++ lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h @@ -39,6 +39,7 @@ ~GDBRemoteDynamicRegisterInfo() override = default; void HardcodeARMRegisters(bool from_scratch); + bool UpdateARM64SVERegistersInfos(uint64_t vg); }; class GDBRemoteRegisterContext : public RegisterContext { @@ -77,6 +78,8 @@ uint32_t ConvertRegisterKindToRegisterNumber(lldb::RegisterKind kind, uint32_t num) override; + bool AArch64SVEReconfigure(); + protected: friend class ThreadGDBRemote; Index: lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp === --- lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp +++ lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp @@ -214,8 +214,8 @@ for (int i = 0; i < regcount; i++) { struct RegisterInfo *reginfo = m_reg_info_sp->GetRegisterInfoAtIndex(i); -if (reginfo->byte_offset + reginfo->byte_size - <= buffer_sp->GetByteSize()) { +if (reginfo->byte_offset + reginfo->byte_size <= +buffer_sp->GetByteSize()) { m_reg_valid[i] = true; } else { m_reg_valid[i] = false; @@ -344,6 +344,15 @@ if (dst == nullptr) return false; + // Code below is specific to AArch64 target in SVE state + // If vector granule (vg) register is being written then thread's + // register context reconfiguration is triggered on success. + bool do_reconfigure_arm64_sve = false; + const ArchSpec &arch = process->GetTarget().GetArchitecture(); + if (arch.IsValid() && arch.GetTriple().isAArch64()) +if (strcmp(reg_info->name, "vg") == 0) + do_reconfigure_arm64_sve = true; + if (data.CopyByteOrderedData(data_offset,// src offset reg_info->byte_size,// src length dst,// dst @@ -363,6 +372,11 @@ { SetAllRegisterValid(false); + + if (do_reconfigure_arm64_sve && + GetPrimordialRegister(reg_info, gdb_comm)) +AArch64SVEReconfigure(); + return true; } } else { @@ -391,6 +405,10 @@ } else { // This is an actual register, write it success = SetPrimordialRegister(reg_info, gdb_comm); + + if (success && do_reconfigure_arm64_sve && + GetPrimordialRegister(reg_info, gdb_comm)) +AArch64SVEReconfigure(); } // Check if writing this register will invalidate any other register @@ -656,9 +674,8 @@ if (m_thread.GetProcess().get()) { const ArchSpec &arch = m_thread.GetProcess()->GetTarget().GetArchitecture
[Lldb-commits] [PATCH] D82866: [LLDB] Test SVE dynamic resize with multiple threads
omjavaid updated this revision to Diff 314328. omjavaid added a comment. Added separate thread functions for both threads. CHANGES SINCE LAST ACTION https://reviews.llvm.org/D82866/new/ https://reviews.llvm.org/D82866 Files: lldb/test/API/commands/register/register/aarch64_sve_registers/rw_access_dynamic_resize/Makefile lldb/test/API/commands/register/register/aarch64_sve_registers/rw_access_dynamic_resize/TestSVEThreadedDynamic.py lldb/test/API/commands/register/register/aarch64_sve_registers/rw_access_dynamic_resize/main.c Index: lldb/test/API/commands/register/register/aarch64_sve_registers/rw_access_dynamic_resize/main.c === --- /dev/null +++ lldb/test/API/commands/register/register/aarch64_sve_registers/rw_access_dynamic_resize/main.c @@ -0,0 +1,96 @@ +#include +#include + +static inline void write_sve_registers() { + asm volatile("setffr\n\t"); + asm volatile("ptrue p0.b\n\t"); + asm volatile("ptrue p1.h\n\t"); + asm volatile("ptrue p2.s\n\t"); + asm volatile("ptrue p3.d\n\t"); + asm volatile("pfalse p4.b\n\t"); + asm volatile("ptrue p5.b\n\t"); + asm volatile("ptrue p6.h\n\t"); + asm volatile("ptrue p7.s\n\t"); + asm volatile("ptrue p8.d\n\t"); + asm volatile("pfalse p9.b\n\t"); + asm volatile("ptrue p10.b\n\t"); + asm volatile("ptrue p11.h\n\t"); + asm volatile("ptrue p12.s\n\t"); + asm volatile("ptrue p13.d\n\t"); + asm volatile("pfalse p14.b\n\t"); + asm volatile("ptrue p15.b\n\t"); + + asm volatile("cpy z0.b, p0/z, #1\n\t"); + asm volatile("cpy z1.b, p5/z, #2\n\t"); + asm volatile("cpy z2.b, p10/z, #3\n\t"); + asm volatile("cpy z3.b, p15/z, #4\n\t"); + asm volatile("cpy z4.b, p0/z, #5\n\t"); + asm volatile("cpy z5.b, p5/z, #6\n\t"); + asm volatile("cpy z6.b, p10/z, #7\n\t"); + asm volatile("cpy z7.b, p15/z, #8\n\t"); + asm volatile("cpy z8.b, p0/z, #9\n\t"); + asm volatile("cpy z9.b, p5/z, #10\n\t"); + asm volatile("cpy z10.b, p10/z, #11\n\t"); + asm volatile("cpy z11.b, p15/z, #12\n\t"); + asm volatile("cpy z12.b, p0/z, #13\n\t"); + asm volatile("cpy z13.b, p5/z, #14\n\t"); + asm volatile("cpy z14.b, p10/z, #15\n\t"); + asm volatile("cpy z15.b, p15/z, #16\n\t"); + asm volatile("cpy z16.b, p0/z, #17\n\t"); + asm volatile("cpy z17.b, p5/z, #18\n\t"); + asm volatile("cpy z18.b, p10/z, #19\n\t"); + asm volatile("cpy z19.b, p15/z, #20\n\t"); + asm volatile("cpy z20.b, p0/z, #21\n\t"); + asm volatile("cpy z21.b, p5/z, #22\n\t"); + asm volatile("cpy z22.b, p10/z, #23\n\t"); + asm volatile("cpy z23.b, p15/z, #24\n\t"); + asm volatile("cpy z24.b, p0/z, #25\n\t"); + asm volatile("cpy z25.b, p5/z, #26\n\t"); + asm volatile("cpy z26.b, p10/z, #27\n\t"); + asm volatile("cpy z27.b, p15/z, #28\n\t"); + asm volatile("cpy z28.b, p0/z, #29\n\t"); + asm volatile("cpy z29.b, p5/z, #30\n\t"); + asm volatile("cpy z30.b, p10/z, #31\n\t"); + asm volatile("cpy z31.b, p15/z, #32\n\t"); +} + +void *threadX_func(void *x_arg) { + prctl(PR_SVE_SET_VL, 8 * 4); + write_sve_registers(); + write_sve_registers(); // Thread X breakpoint 1 + return NULL; // Thread X breakpoint 2 +} + +void *threadY_func(void *y_arg) { + prctl(PR_SVE_SET_VL, 8 * 2); + write_sve_registers(); + write_sve_registers(); // Thread Y breakpoint 1 + return NULL; // Thread Y breakpoint 2 +} + +int main() { + /* this variable is our reference to the second thread */ + pthread_t x_thread, y_thread; + + /* Set vector length to 8 and write SVE registers values */ + prctl(PR_SVE_SET_VL, 8 * 8); + write_sve_registers(); + + /* create a second thread which executes with argument x */ + if (pthread_create(&x_thread, NULL, threadX_func, 0)) // Break in main thread +return 1; + + /* create a second thread which executes with argument y */ + if (pthread_create(&y_thread, NULL, threadY_func, 0)) +return 1; + + /* wait for the first thread to finish */ + if (pthread_join(x_thread, NULL)) +return 2; + + /* wait for the second thread to finish */ + if (pthread_join(y_thread, NULL)) +return 2; + + return 0; +} Index: lldb/test/API/commands/register/register/aarch64_sve_registers/rw_access_dynamic_resize/TestSVEThreadedDynamic.py === --- /dev/null +++ lldb/test/API/commands/register/register/aarch64_sve_registers/rw_access_dynamic_resize/TestSVEThreadedDynamic.py @@ -0,0 +1,138 @@ +""" +Test the AArch64 SVE registers dynamic resize with multiple threads. +""" + +import lldb +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class RegisterCommandsTestCase(TestBase): + +def check_sve_registers(self, vg_test_value): +z_reg_size = vg_test_value * 8 +p_reg_size = int(z_reg_size / 8) + +p_value_bytes = ['0xff', '0x55', '0x11', '0x01', '0x00'] + +for i in range(32): +s_reg_value = 's
[Lldb-commits] [PATCH] D93481: [lldb/Lua] add support for multiline scripted breakpoints
tammela added a comment. ping Repository: rG LLVM Github Monorepo CHANGES SINCE LAST ACTION https://reviews.llvm.org/D93481/new/ https://reviews.llvm.org/D93481 ___ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits
[Lldb-commits] [PATCH] D93649: [lldb/Lua] add support for Lua function breakpoint
tammela added a comment. ping Repository: rG LLVM Github Monorepo CHANGES SINCE LAST ACTION https://reviews.llvm.org/D93649/new/ https://reviews.llvm.org/D93649 ___ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits
[Lldb-commits] [PATCH] D93874: [process] fix exec support on Linux
wallace added inline comments. Comment at: lldb/test/API/functionalities/exec/TestExec.py:91-93 +# Single step to create a thread plan. We have to make sure that after exec +# we clear all existing thread plans. +thread.StepInstruction(False) labath wrote: > A separate test case would be better, as (even with the comment) a random > future modification of the test could negate this. Sure! I'll wait for @jingham to chime first Repository: rG LLVM Github Monorepo CHANGES SINCE LAST ACTION https://reviews.llvm.org/D93874/new/ https://reviews.llvm.org/D93874 ___ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits
[Lldb-commits] [PATCH] D93951: [vscode] Improve runInTerminal and support linux
clayborg requested changes to this revision. clayborg added a comment. This revision now requires changes to proceed. Great stuff! See inline comments. A few new tests need to be added: - test running lldb-vscode with invalid "--launch-target" executable path so things fail to exec and verify we get as nice of an error message as possible - test running lldb-vscode with valid "--launch-target" and no "--pid-file" option to make sure lldb-vscode fails gracefully with an error and appropriate message - test running lldb-vscode with valid "--launch-target" and an invalid "--pid-file" path with an invalid directory - test running lldb-vscode with valid "--launch-target" and an invalid "--pid-file" path that points somewhere we don't have permissions All these tests will ensure we don't end up with a deadlock situation when trying to launch in terminal. Comment at: lldb/tools/lldb-vscode/lldb-vscode.cpp:1454 + // adaptor. +#if !defined(_WIN32) + if (int err = mkfifo(pid_file.c_str(), 0666)) { If windows is not supported, should we avoid calling this function (CreateRunInTerminalPidFile) in the first place? Comment at: lldb/tools/lldb-vscode/lldb-vscode.cpp:1472 + std::ifstream pid_file_reader(pid_file, std::ifstream::in); + pid_file_reader >> pid; + return pid; What happens if the pid never gets written to the stream? Deadlock forever? Also, what happens if an error happens during the read? Can we check if the stream has an error after readind "pid" just in case? If there is error, (like what if the other process wrote "hello" to the stream instead of "123"), what ends up in "pid"? Is it set to zero? Or just left as a random number? If it is left alone, we should initialize "pid" with the LLDB_INVALID_PROCESS_ID on line 1470 Comment at: lldb/tools/lldb-vscode/lldb-vscode.cpp:1506 if (error.Success()) { -// Wait for the attach stop event to happen or for a timeout. -g_vsc.waiting_for_run_in_terminal = true; -static std::mutex mutex; -std::unique_lock locker(mutex); -g_vsc.request_in_terminal_cv.wait_for(locker, std::chrono::seconds(10)); +lldb::pid_t pid = GetRunInTerminalDebuggeePid(pid_file); Check for invalid LLDB_INVALID_PROCESS_ID here in case something went wrong when trying to read the "pid" and return an error Comment at: lldb/tools/lldb-vscode/lldb-vscode.cpp:1517-1518 + NotifyRunInTerminalDebuggeeWasAttached(pid_file); + // We resume the process to let stopOnEntry decide whether to stop the + // process or not. + g_vsc.target.GetProcess().Continue(); Do we need more to this comment? Aren't we continuing the lldb-vscode that will exec into our program here? If we are, we should clearly explain what is happening here. Comment at: lldb/tools/lldb-vscode/lldb-vscode.cpp:3003 int main(int argc, char *argv[]) { + g_vsc.debug_adaptor_path = argv[0]; + This path can be relative, so we might need to resolve it using a llvm path call? I can't remember if execv() can use a relative path or not. Comment at: lldb/tools/lldb-vscode/lldb-vscode.cpp:3011 +#if !defined(_WIN32) + if (auto *target_arg = input_args.getLastArg(OPT_launch_target)) { +std::string pid_file = input_args.getLastArg(OPT_pid_file)->getValue(); We need a nice beefy comment here as to what is going on explaining everything. Comment at: lldb/tools/lldb-vscode/lldb-vscode.cpp:3012 + if (auto *target_arg = input_args.getLastArg(OPT_launch_target)) { +std::string pid_file = input_args.getLastArg(OPT_pid_file)->getValue(); +std::ofstream pid_file_writer(pid_file, std::ofstream::out); Does the llvm argument parser already guarantee that we have a valid value in pid_file? If not, we need to make sure "pid_file" is not empty here and return an error if it wasn't specified Comment at: lldb/tools/lldb-vscode/lldb-vscode.cpp:3013 +std::string pid_file = input_args.getLastArg(OPT_pid_file)->getValue(); +std::ofstream pid_file_writer(pid_file, std::ofstream::out); +pid_file_writer << getpid() << std::endl; error check that the creation of the ofstream succeeded and return an error if it fails and exit with EXIT_FAILURE Comment at: lldb/tools/lldb-vscode/lldb-vscode.cpp:3022 + if (RunInTerminalDebugeeWasAttached(pid_file)) { +const char *target = target_arg->getValue(); +execv(target, argv + target_arg->getIndex() + 2); Should we verify that "target" is a valid file that exists prior to just calling execv? We might be able to return a better error message. Or we can check if the file exists after the "execv(...)" call and return a better error message than "Couldn't launch target" like "Failed to launch '/path/that
[Lldb-commits] [PATCH] D93874: [process] fix exec support on Linux
clayborg added a comment. Walter and I identified this at work, definitely want Jim to chime in on this. Comment at: lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp:2614-2616 m_thread_list_real.Clear(); m_thread_list.Clear(); +m_thread_plans.Clear(); These things seem like they could go into a new function on the lldb_private::Process base class like: ``` void Process::ProcessDidExec() { m_thread_list_real.Clear(); m_thread_list.Clear(); m_thread_plans.Clear(); } ``` I know we are encourage people to use the lldb-server debugging route, but having common code in ProcessGDBRemote that each Process subclass will need to figure out how to do correctly seems like an easy fix. Comment at: lldb/test/API/functionalities/exec/TestExec.py:96 # Run and we should stop due to exec process.Continue() Do we need this continue if we did the step above? How does this test still work if the step was supposed to step over the exec? Repository: rG LLVM Github Monorepo CHANGES SINCE LAST ACTION https://reviews.llvm.org/D93874/new/ https://reviews.llvm.org/D93874 ___ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits
[Lldb-commits] [PATCH] D94033: [lldb-vscode] improve modules request
wallace created this revision. wallace added reviewers: clayborg, kusmour, aadsm. wallace requested review of this revision. Herald added a project: LLDB. Herald added a subscriber: lldb-commits. lldb-vsdode was communicating the list of modules to the IDE with events, which in practice ended up having some drawbacks - when debugging large targets, the number of these events were easily 10k, which polluted the messages being transmitted, which caused the following: a harder time debugging the messages, a lag after terminated the process because of these messages being processes (this could easily take several seconds). The latter was specially bad, as users were complaining about it even when they didn't check the modules view. - these events were rarely used, as users only check the modules view when something is wrong and they try to debug things. After getting some feedback from users, we realized that it's better to not used events but make this simply a request and is triggered by users whenever they needed. This diff achieves that and does some small clean up in the existing code. Repository: rG LLVM Github Monorepo https://reviews.llvm.org/D94033 Files: lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/vscode.py lldb/test/API/tools/lldb-vscode/module/TestVSCode_module.py lldb/tools/lldb-vscode/VSCode.cpp lldb/tools/lldb-vscode/lldb-vscode.cpp Index: lldb/tools/lldb-vscode/lldb-vscode.cpp === --- lldb/tools/lldb-vscode/lldb-vscode.cpp +++ lldb/tools/lldb-vscode/lldb-vscode.cpp @@ -440,30 +440,6 @@ g_vsc.SendJSON(llvm::json::Value(std::move(bp_event))); } } - } else if (lldb::SBTarget::EventIsTargetEvent(event)) { -if (event_mask & lldb::SBTarget::eBroadcastBitModulesLoaded || -event_mask & lldb::SBTarget::eBroadcastBitModulesUnloaded || -event_mask & lldb::SBTarget::eBroadcastBitSymbolsLoaded) { - int num_modules = lldb::SBTarget::GetNumModulesFromEvent(event); - for (int i = 0; i < num_modules; i++) { -auto module = lldb::SBTarget::GetModuleAtIndexFromEvent(i, event); -auto module_event = CreateEventObject("module"); -llvm::json::Value module_value = CreateModule(module); -llvm::json::Object body; -if (event_mask & lldb::SBTarget::eBroadcastBitModulesLoaded) { - body.try_emplace("reason", "new"); -} else if (event_mask & - lldb::SBTarget::eBroadcastBitModulesUnloaded) { - body.try_emplace("reason", "removed"); -} else if (event_mask & - lldb::SBTarget::eBroadcastBitSymbolsLoaded) { - body.try_emplace("reason", "changed"); -} -body.try_emplace("module", module_value); -module_event.try_emplace("body", std::move(body)); -g_vsc.SendJSON(llvm::json::Value(std::move(module_event))); - } -} } else if (event.BroadcasterMatchesRef(g_vsc.broadcaster)) { if (event_mask & eBroadcastBitStopEventThread) { done = true; @@ -1196,26 +1172,26 @@ g_vsc.SendJSON(llvm::json::Value(std::move(response))); } -// "getCompileUnitsRequest": { +// "compileUnitsRequest": { // "allOf": [ { "$ref": "#/definitions/Request" }, { // "type": "object", // "description": "Compile Unit request; value of command field is -// 'getCompileUnits'.", +// 'compileUnits'.", // "properties": { // "command": { // "type": "string", -// "enum": [ "getCompileUnits" ] +// "enum": [ "compileUnits" ] // }, // "arguments": { -// "$ref": "#/definitions/getCompileUnitRequestArguments" +// "$ref": "#/definitions/compileUnitRequestArguments" // } // }, // "required": [ "command", "arguments" ] // }] // }, -// "getCompileUnitsRequestArguments": { +// "compileUnitsRequestArguments": { // "type": "object", -// "description": "Arguments for 'getCompileUnits' request.", +// "description": "Arguments for 'compileUnits' request.", // "properties": { // "moduleId": { // "type": "string", @@ -1224,23 +1200,21 @@ // }, // "required": [ "moduleId" ] // }, -// "getCompileUnitsResponse": { +// "compileUnitsResponse": { // "allOf": [ { "$ref": "#/definitions/Response" }, { // "type": "object", -// "description": "Response to 'getCompileUnits' request.", +// "description": "Response to 'compileUnits' request.", // "properties": { // "body": { -// "description": "Response to 'getCompileUnits' request. Array of +// "description": "Response to 'compileUnits' request. Array of // paths of compile units." // } // } // }] // } - -void request_getCompileUnits(const llvm::json::Object &re
[Lldb-commits] [PATCH] D93895: Implement vAttachWait in lldb-server
clayborg added a comment. Patch looks good, just a question of what timeout to use by default and possible adding an option to lldb-server in case users want faster or slower polling. Comment at: lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp:344 + // TODO: Make the polling interval configurable. + std::chrono::milliseconds polling_interval = std::chrono::seconds(1); + We probably want this to be quite a bit shorter than 1 second. Why? Because if you don't find the process right away, 1 second is a long time for the system to wait and your program can execute quite a few instructions in 1 second. By the time you attach, you might have missed your breakpoint at main. Nothing will guarantee hitting a breakpoint, but I would suggest making a smaller timeout, maybe like 1 millisecond. It would be good to benchmark this a bit by waiting for a process and not launching a new process to see how much CPU lldb-server takes up with a shorter timeout. Adding a command line option might be nice is someone could specify "--wait-poll-msec 0" to constantly poll the system as fast as possible in case they do need to catch the process as early as possible. Repository: rG LLVM Github Monorepo CHANGES SINCE LAST ACTION https://reviews.llvm.org/D93895/new/ https://reviews.llvm.org/D93895 ___ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits
[Lldb-commits] [PATCH] D93874: [process] fix exec support on Linux
wallace added inline comments. Comment at: lldb/test/API/functionalities/exec/TestExec.py:96 # Run and we should stop due to exec process.Continue() clayborg wrote: > Do we need this continue if we did the step above? How does this test still > work if the step was supposed to step over the exec? It seems to me that the StepInstruction before happens before the exec is reached, and after this process.Continue() exec will be reached Repository: rG LLVM Github Monorepo CHANGES SINCE LAST ACTION https://reviews.llvm.org/D93874/new/ https://reviews.llvm.org/D93874 ___ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits
[Lldb-commits] [PATCH] D94033: [lldb-vscode] improve modules request
clayborg accepted this revision. clayborg added a comment. This revision is now accepted and ready to land. LGTM Repository: rG LLVM Github Monorepo CHANGES SINCE LAST ACTION https://reviews.llvm.org/D94033/new/ https://reviews.llvm.org/D94033 ___ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits
[Lldb-commits] [PATCH] D93895: Implement vAttachWait in lldb-server
augusto2112 added inline comments. Comment at: lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp:344 + // TODO: Make the polling interval configurable. + std::chrono::milliseconds polling_interval = std::chrono::seconds(1); + clayborg wrote: > We probably want this to be quite a bit shorter than 1 second. Why? Because > if you don't find the process right away, 1 second is a long time for the > system to wait and your program can execute quite a few instructions in 1 > second. By the time you attach, you might have missed your breakpoint at > main. Nothing will guarantee hitting a breakpoint, but I would suggest making > a smaller timeout, maybe like 1 millisecond. It would be good to benchmark > this a bit by waiting for a process and not launching a new process to see > how much CPU lldb-server takes up with a shorter timeout. > > Adding a command line option might be nice is someone could specify > "--wait-poll-msec 0" to constantly poll the system as fast as possible in > case they do need to catch the process as early as possible. > Adding a command line option might be nice is someone could specify > "--wait-poll-msec 0" to constantly poll the system as fast as possible in > case they do need to catch the process as early as possible. It looks like there already is an option like what you're describing on debugserver: "waitfor-interval". As well as another related flag: "waitfor-duration". I'll try implementing them in this patch as well. Repository: rG LLVM Github Monorepo CHANGES SINCE LAST ACTION https://reviews.llvm.org/D93895/new/ https://reviews.llvm.org/D93895 ___ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits