From 76db8a9522235985237184eb6e593a3b905177b1 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Wed, 28 Jan 2026 22:16:33 +0100 Subject: [PATCH 1/8] always pass document scope by reference --- src/DocumentStore.zig | 8 ++++---- src/analysis.zig | 14 +++++++------- src/features/completions.zig | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 8cf7dd13d..253c19641 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -332,8 +332,8 @@ pub const Handle = struct { return self.impl.import_uris.?; } - pub fn getDocumentScope(self: *Handle) error{OutOfMemory}!DocumentScope { - if (self.getStatus().has_document_scope) return self.impl.document_scope; + pub fn getDocumentScope(self: *Handle) error{OutOfMemory}!*const DocumentScope { + if (self.getStatus().has_document_scope) return &self.impl.document_scope; return try self.getLazy(DocumentScope, "document_scope", struct { fn create(handle: *Handle, allocator: std.mem.Allocator) error{OutOfMemory}!DocumentScope { var document_scope: DocumentScope = try .init(allocator, &handle.tree); @@ -545,7 +545,7 @@ pub const Handle = struct { comptime T: type, comptime name: []const u8, comptime Context: type, - ) error{OutOfMemory}!T { + ) error{OutOfMemory}!*const T { @branchHint(.cold); const tracy_zone = tracy.traceNamed(@src(), "getLazy(" ++ name ++ ")"); defer tracy_zone.end(); @@ -576,7 +576,7 @@ pub const Handle = struct { const old_has_data = self.impl.status.bitSet(@bitOffsetOf(Status, has_data_field_name), .release); std.debug.assert(old_has_data == 0); // race condition } - return @field(self.impl, name); + return &@field(self.impl, name); } fn getStatus(self: *const Handle) Status { diff --git a/src/analysis.zig b/src/analysis.zig index fbc9b4d1e..e660dc904 100644 --- a/src/analysis.zig +++ b/src/analysis.zig @@ -5806,7 +5806,7 @@ pub fn collectAllSymbolsAtSourceIndex( std.debug.assert(source_index <= handle.tree.source.len); const document_scope = try handle.getDocumentScope(); - var scope_iterator = iterateEnclosingScopes(&document_scope, source_index); + var scope_iterator = iterateEnclosingScopes(document_scope, source_index); while (scope_iterator.next().unwrap()) |scope_index| { const scope_decls = document_scope.getScopeDeclarationsConst(scope_index); for (scope_decls) |decl_index| { @@ -5862,7 +5862,7 @@ fn iterateEnclosingScopes(document_scope: *const DocumentScope, source_index: us pub fn iterateLabels(handle: *DocumentStore.Handle, source_index: usize, comptime callback: anytype, context: anytype) error{OutOfMemory}!void { const document_scope = try handle.getDocumentScope(); - var scope_iterator = iterateEnclosingScopes(&document_scope, source_index); + var scope_iterator = iterateEnclosingScopes(document_scope, source_index); while (scope_iterator.next().unwrap()) |scope_index| { for (document_scope.getScopeDeclarationsConst(scope_index)) |decl_index| { const decl = document_scope.declarations.get(@intFromEnum(decl_index)); @@ -5873,18 +5873,18 @@ pub fn iterateLabels(handle: *DocumentStore.Handle, source_index: usize, comptim } pub fn innermostScopeAtIndex( - document_scope: DocumentScope, + document_scope: *const DocumentScope, source_index: usize, ) Scope.Index { return innermostScopeAtIndexWithTag(document_scope, source_index, .initFull()).unwrap().?; } pub fn innermostScopeAtIndexWithTag( - document_scope: DocumentScope, + document_scope: *const DocumentScope, source_index: usize, tag_filter: std.EnumSet(Scope.Tag), ) Scope.OptionalIndex { - var scope_iterator = iterateEnclosingScopes(&document_scope, source_index); + var scope_iterator = iterateEnclosingScopes(document_scope, source_index); var scope_index: Scope.OptionalIndex = .none; while (scope_iterator.next().unwrap()) |inner_scope| { const scope_tag = document_scope.getScopeTag(inner_scope); @@ -5907,7 +5907,7 @@ pub fn innermostContainer(analyser: *Analyser, handle: *DocumentStore.Handle, so var current: DocumentScope.Scope.Index = .root; var meta_params: TokenToTypeMap = .empty; - var scope_iterator = iterateEnclosingScopes(&document_scope, source_index); + var scope_iterator = iterateEnclosingScopes(document_scope, source_index); while (scope_iterator.next().unwrap()) |scope_index| { switch (document_scope.getScopeTag(scope_index)) { .container => { @@ -5954,7 +5954,7 @@ pub fn lookupLabel( source_index: usize, ) error{OutOfMemory}!?DeclWithHandle { const document_scope = try handle.getDocumentScope(); - var scope_iterator = iterateEnclosingScopes(&document_scope, source_index); + var scope_iterator = iterateEnclosingScopes(document_scope, source_index); while (scope_iterator.next().unwrap()) |scope_index| { const decl_index = document_scope.getScopeDeclaration(.{ .scope = scope_index, diff --git a/src/features/completions.zig b/src/features/completions.zig index 44e7b4c8e..484d0a3bb 100644 --- a/src/features/completions.zig +++ b/src/features/completions.zig @@ -1038,7 +1038,7 @@ fn globalSetCompletions(builder: *Builder, kind: enum { error_set, enum_set }) A for (dependencies.items) |uri| { // not every dependency is loaded which results in incomplete completion const dependency_handle = store.getHandle(uri) orelse continue; - const document_scope: DocumentScope = try dependency_handle.getDocumentScope(); + const document_scope = try dependency_handle.getDocumentScope(); const curr_set: DocumentScope.IdentifierSet = switch (kind) { .error_set => @field(document_scope, "global_error_set"), .enum_set => @field(document_scope, "global_enum_set"), From 72dc81170f86bb900e0f407e9fc6ee38b7702c35 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Wed, 28 Jan 2026 22:47:15 +0100 Subject: [PATCH 2/8] refactor Handle.AssociatedBuildFile --- src/DocumentStore.zig | 69 ++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 253c19641..beb683d35 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -191,38 +191,7 @@ pub const Handle = struct { import_uris: ?[]Uri = null, document_scope: DocumentScope = undefined, - associated_build_file: union(enum) { - /// The initial state. The associated build file (build.zig) is resolved lazily. - init, - /// The associated build file (build.zig) has been requested but has not yet been resolved. - unresolved: struct { - /// The build files are ordered in decreasing priority. - potential_build_files: []const *BuildFile, - /// to avoid checking build files multiple times, a bitset stores whether or - /// not the build file should be skipped because it has previously been - /// found to be "unassociated" with the handle. - has_been_checked: std.DynamicBitSetUnmanaged, - }, - /// The Handle has no associated build file (build.zig). - none, - /// The associated build file (build.zig) has been successfully resolved. - resolved: GetAssociatedBuildFileResult.Resolved, - - fn deinit(self: *@This(), allocator: std.mem.Allocator) void { - switch (self.*) { - .init, .none => {}, - .unresolved => |*unresolved| { - allocator.free(unresolved.potential_build_files); - unresolved.has_been_checked.deinit(allocator); - }, - .resolved => |resolved| { - allocator.free(resolved.root_source_file); - }, - } - self.* = undefined; - } - } = .init, - + associated_build_file: AssociatedBuildFile.State = .init, associated_compilation_units: GetAssociatedCompilationUnitsResult = .unresolved, }, @@ -357,7 +326,7 @@ pub const Handle = struct { return self.impl.document_scope; } - pub const GetAssociatedBuildFileResult = union(enum) { + pub const AssociatedBuildFile = union(enum) { /// The Handle has no associated build file (build.zig). none, /// The associated build file (build.zig) has not been resolved yet. @@ -369,13 +338,45 @@ pub const Handle = struct { build_file: *BuildFile, root_source_file: []const u8, }; + + const State = union(enum) { + /// The initial state. The associated build file (build.zig) is resolved lazily. + init, + /// The associated build file (build.zig) has been requested but has not yet been resolved. + unresolved: struct { + /// The build files are ordered in decreasing priority. + potential_build_files: []const *BuildFile, + /// to avoid checking build files multiple times, a bitset stores whether or + /// not the build file should be skipped because it has previously been + /// found to be "unassociated" with the handle. + has_been_checked: std.DynamicBitSetUnmanaged, + }, + /// The Handle has no associated build file (build.zig). + none, + /// The associated build file (build.zig) has been successfully resolved. + resolved: Resolved, + + fn deinit(state: *State, allocator: std.mem.Allocator) void { + switch (state.*) { + .init, .none => {}, + .unresolved => |*unresolved| { + allocator.free(unresolved.potential_build_files); + unresolved.has_been_checked.deinit(allocator); + }, + .resolved => |resolved| { + allocator.free(resolved.root_source_file); + }, + } + state.* = undefined; + } + }; }; /// Returns the associated build file (build.zig) of the handle. /// /// `DocumentStore.build_files` is guaranteed to contain this Uri. /// Uri memory managed by its build_file - pub fn getAssociatedBuildFile(self: *Handle, document_store: *DocumentStore) error{ Canceled, OutOfMemory }!GetAssociatedBuildFileResult { + pub fn getAssociatedBuildFile(self: *Handle, document_store: *DocumentStore) error{ Canceled, OutOfMemory }!AssociatedBuildFile { comptime std.debug.assert(supports_build_system); try self.impl.lock.lock(document_store.io); From bd3afbc2897d91cda7ac4b39f3896237edb52e02 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Wed, 28 Jan 2026 22:36:17 +0100 Subject: [PATCH 3/8] refactor how lsp synced flag is stored for handles --- src/DocumentStore.zig | 38 ++++++++++++-------------------------- src/Server.zig | 2 +- 2 files changed, 13 insertions(+), 27 deletions(-) diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index beb683d35..1873f21b9 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -177,6 +177,10 @@ pub const Handle = struct { tree: Ast, /// Contains one entry for every cimport in the document cimports: std.MultiArrayList(CImportHandle), + /// `true` if the document has been directly opened by the client i.e. with `textDocument/didOpen` + /// `false` indicates the document only exists because it is a dependency of another document + /// or has been closed with `textDocument/didClose`. + lsp_synced: bool, /// private field impl: struct { @@ -196,16 +200,12 @@ pub const Handle = struct { }, const Status = packed struct(u32) { - /// `true` if the document has been directly opened by the client i.e. with `textDocument/didOpen` - /// `false` indicates the document only exists because it is a dependency of another document - /// or has been closed with `textDocument/didClose`. - lsp_synced: bool = false, /// true if a thread has acquired the permission to compute the `DocumentScope` /// all other threads will wait until the given thread has computed the `DocumentScope` before reading it. has_document_scope_lock: bool = false, /// true if `handle.impl.document_scope` has been set has_document_scope: bool = false, - _: u29 = 0, + _: u30 = 0, }; /// Takes ownership of `text` on success. @@ -228,10 +228,9 @@ pub const Handle = struct { .uri = uri, .tree = tree, .cimports = cimports, + .lsp_synced = lsp_synced, .impl = .{ - .status = .init(@bitCast(Status{ - .lsp_synced = lsp_synced, - })), + .status = .init(@bitCast(Status{})), .store = store, }, }; @@ -584,19 +583,6 @@ pub const Handle = struct { return @bitCast(self.impl.status.load(.acquire)); } - pub fn isLspSynced(self: *const Handle) bool { - return self.getStatus().lsp_synced; - } - - /// returns the previous value - fn setLspSynced(self: *Handle, lsp_synced: bool) bool { - if (lsp_synced) { - return self.impl.status.bitSet(@offsetOf(Handle.Status, "lsp_synced"), .release) == 1; - } else { - return self.impl.status.bitReset(@offsetOf(Handle.Status, "lsp_synced"), .release) == 1; - } - } - fn parseTree(allocator: std.mem.Allocator, new_text: [:0]const u8, mode: Ast.Mode) error{OutOfMemory}!Ast { const tracy_zone_inner = tracy.traceNamed(@src(), "Ast.parse"); defer tracy_zone_inner.end(); @@ -766,7 +752,7 @@ pub fn openLspSyncedDocument(self: *DocumentStore, uri: Uri, text: []const u8) e defer tracy_zone.end(); if (self.handles.get(uri)) |handle| { - if (handle.isLspSynced()) { + if (handle.lsp_synced) { log.warn("Document already open: {s}", .{uri.raw}); } } @@ -785,7 +771,7 @@ pub fn closeLspSyncedDocument(self: *DocumentStore, uri: Uri) void { log.warn("Document not found: {s}", .{uri.raw}); return; }; - if (!kv.value.isLspSynced()) { + if (!kv.value.lsp_synced) { log.warn("Document already closed: {s}", .{uri.raw}); } @@ -802,7 +788,7 @@ pub fn refreshLspSyncedDocument(self: *DocumentStore, uri: Uri, new_text: [:0]co defer tracy_zone.end(); if (self.handles.get(uri)) |old_handle| { - if (!old_handle.isLspSynced()) { + if (!old_handle.lsp_synced) { log.warn("Document modified without being opened: {s}", .{uri.raw}); } } else { @@ -821,7 +807,7 @@ pub fn refreshDocumentFromFileSystem(self: *DocumentStore, uri: Uri, should_dele if (should_delete) { const index = self.handles.getIndex(uri) orelse return false; const handle = self.handles.values()[index]; - if (handle.isLspSynced()) return false; + if (handle.lsp_synced) return false; self.handles.swapRemoveAt(index); const handle_uri = handle.uri; @@ -830,7 +816,7 @@ pub fn refreshDocumentFromFileSystem(self: *DocumentStore, uri: Uri, should_dele handle_uri.deinit(self.allocator); } else { if (self.handles.get(uri)) |handle| { - if (handle.isLspSynced()) return false; + if (handle.lsp_synced) return false; } else return false; const file_contents = try self.readFile(uri) orelse return false; _ = try self.createAndStoreDocument(uri, file_contents, false); diff --git a/src/Server.zig b/src/Server.zig index 6f9f9adf3..7b64135cd 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -1030,7 +1030,7 @@ pub fn resolveConfiguration(server: *Server) error{ Canceled, OutOfMemory }!void server.client_capabilities.supports_publish_diagnostics) { for (server.document_store.handles.values()) |handle| { - if (!handle.isLspSynced()) continue; + if (!handle.lsp_synced) continue; server.generateDiagnostics(handle); } } From b4c2a3fd87e95d53e2c1e8c4815c55b50fd20f3a Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 29 Jan 2026 01:38:10 +0100 Subject: [PATCH 4/8] refactor lazy resources of document handles --- src/DocumentStore.zig | 226 ++++++++++++++++++------------------------ src/analysis.zig | 2 +- 2 files changed, 97 insertions(+), 131 deletions(-) diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 1873f21b9..99c1115f8 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -154,7 +154,7 @@ pub const BuildFile = struct { const handle = try store.getOrLoadHandle(source_uri) orelse return .unknown; - const import_uris = try handle.getImportUris(); + const import_uris = (try handle.import_uris.get(handle)).*; try found_uris.ensureUnusedCapacity(arena, import_uris.len); for (import_uris) |import_uri| found_uris.putAssumeCapacity(try import_uri.dupe(arena), {}); } @@ -181,33 +181,17 @@ pub const Handle = struct { /// `false` indicates the document only exists because it is a dependency of another document /// or has been closed with `textDocument/didClose`. lsp_synced: bool, + document_scope: Lazy(DocumentScope, DocumentStoreContext) = .unset, + import_uris: Lazy([]const Uri, ImportUrisContext) = .unset, /// private field impl: struct { - /// @bitCast from/to `Status` - status: std.atomic.Value(u32), store: *DocumentStore, - lock: std.Io.Mutex = .init, - /// See `getLazy` - lazy_condition: std.Io.Condition = .init, - - import_uris: ?[]Uri = null, - document_scope: DocumentScope = undefined, - associated_build_file: AssociatedBuildFile.State = .init, associated_compilation_units: GetAssociatedCompilationUnitsResult = .unresolved, }, - const Status = packed struct(u32) { - /// true if a thread has acquired the permission to compute the `DocumentScope` - /// all other threads will wait until the given thread has computed the `DocumentScope` before reading it. - has_document_scope_lock: bool = false, - /// true if `handle.impl.document_scope` has been set - has_document_scope: bool = false, - _: u30 = 0, - }; - /// Takes ownership of `text` on success. pub fn init( store: *DocumentStore, @@ -230,7 +214,6 @@ pub const Handle = struct { .cimports = cimports, .lsp_synced = lsp_synced, .impl = .{ - .status = .init(@bitCast(Status{})), .store = store, }, }; @@ -241,18 +224,13 @@ pub const Handle = struct { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - const status = self.getStatus(); - const allocator = self.impl.store.allocator; - if (status.has_document_scope) self.impl.document_scope.deinit(allocator); allocator.free(self.tree.source); self.tree.deinit(allocator); - if (self.impl.import_uris) |import_uris| { - for (import_uris) |uri| uri.deinit(allocator); - allocator.free(import_uris); - } + self.document_scope.deinit(allocator); + self.import_uris.deinit(allocator); for (self.cimports.items(.source)) |source| allocator.free(source); self.cimports.deinit(allocator); @@ -263,66 +241,8 @@ pub const Handle = struct { self.* = undefined; } - pub fn getImportUris(self: *Handle) error{OutOfMemory}![]const Uri { - const store = self.impl.store; - const allocator = store.allocator; - const io = store.io; - - self.impl.lock.lockUncancelable(io); - defer self.impl.lock.unlock(io); - - if (self.impl.import_uris) |import_uris| return import_uris; - - var imports = try analysis.collectImports(allocator, &self.tree); - defer imports.deinit(allocator); - - const base_path = self.uri.toFsPath(allocator) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.UnsupportedScheme => { - self.impl.import_uris = &.{}; - return self.impl.import_uris.?; - }, - }; - defer allocator.free(base_path); - - var uris: std.ArrayList(Uri) = try .initCapacity(allocator, imports.items.len); - errdefer { - for (uris.items) |uri| uri.deinit(allocator); - uris.deinit(allocator); - } - - for (imports.items) |import_str| { - if (!std.mem.endsWith(u8, import_str, ".zig")) continue; - uris.appendAssumeCapacity(try resolveFileImportString(allocator, base_path, import_str) orelse continue); - } - - self.impl.import_uris = try uris.toOwnedSlice(allocator); - return self.impl.import_uris.?; - } - pub fn getDocumentScope(self: *Handle) error{OutOfMemory}!*const DocumentScope { - if (self.getStatus().has_document_scope) return &self.impl.document_scope; - return try self.getLazy(DocumentScope, "document_scope", struct { - fn create(handle: *Handle, allocator: std.mem.Allocator) error{OutOfMemory}!DocumentScope { - var document_scope: DocumentScope = try .init(allocator, &handle.tree); - errdefer document_scope.deinit(allocator); - - // remove unused capacity - document_scope.extra.shrinkAndFree(allocator, document_scope.extra.items.len); - try document_scope.declarations.setCapacity(allocator, document_scope.declarations.len); - try document_scope.scopes.setCapacity(allocator, document_scope.scopes.len); - - return document_scope; - } - }); - } - - /// Asserts that `getDocumentScope` has been previously called on `handle`. - pub fn getDocumentScopeCached(self: *Handle) DocumentScope { - if (builtin.mode == .Debug) { - std.debug.assert(self.getStatus().has_document_scope); - } - return self.impl.document_scope; + return try self.document_scope.get(self); } pub const AssociatedBuildFile = union(enum) { @@ -540,49 +460,6 @@ pub const Handle = struct { return self.impl.associated_compilation_units; } - fn getLazy( - self: *Handle, - comptime T: type, - comptime name: []const u8, - comptime Context: type, - ) error{OutOfMemory}!*const T { - @branchHint(.cold); - const tracy_zone = tracy.traceNamed(@src(), "getLazy(" ++ name ++ ")"); - defer tracy_zone.end(); - - const has_data_field_name = "has_" ++ name; - const has_lock_field_name = "has_" ++ name ++ "_lock"; - - const io = self.impl.store.io; - - self.impl.lock.lockUncancelable(io); - defer self.impl.lock.unlock(io); - - while (true) { - const status = self.getStatus(); - if (@field(status, has_data_field_name)) break; - if (@field(status, has_lock_field_name) or - self.impl.status.bitSet(@bitOffsetOf(Status, has_lock_field_name), .release) != 0) - { - // another thread is currently computing the data - self.impl.lazy_condition.waitUncancelable(io, &self.impl.lock); - continue; - } - defer self.impl.lazy_condition.broadcast(io); - - @field(self.impl, name) = try Context.create(self, self.impl.store.allocator); - errdefer comptime unreachable; - - const old_has_data = self.impl.status.bitSet(@bitOffsetOf(Status, has_data_field_name), .release); - std.debug.assert(old_has_data == 0); // race condition - } - return &@field(self.impl, name); - } - - fn getStatus(self: *const Handle) Status { - return @bitCast(self.impl.status.load(.acquire)); - } - fn parseTree(allocator: std.mem.Allocator, new_text: [:0]const u8, mode: Ast.Mode) error{OutOfMemory}!Ast { const tracy_zone_inner = tracy.traceNamed(@src(), "Ast.parse"); defer tracy_zone_inner.end(); @@ -601,6 +478,95 @@ pub const Handle = struct { tree.tokens = tokens.slice(); return tree; } + + fn Lazy(comptime T: type, comptime Context: type) type { + return struct { + mutex: std.Io.Mutex, + value: ?T, + + const LazyResource = @This(); + + pub const unset: LazyResource = .{ + .mutex = .init, + .value = null, + }; + + pub fn deinit(lazy: *LazyResource, allocator: std.mem.Allocator) void { + const value: *T = if (lazy.value) |*value| value else return; + Context.deinit(value, allocator); + lazy.value = undefined; + } + + pub fn get(lazy: *LazyResource, handle: *Handle) error{OutOfMemory}!*const T { + const tracy_zone = tracy.traceNamed(@src(), "Lazy(" ++ @typeName(T) ++ ").get"); + defer tracy_zone.end(); + + const store = handle.impl.store; + const io = store.io; + + lazy.mutex.lockUncancelable(io); + defer lazy.mutex.unlock(io); + if (lazy.value == null) { + lazy.value = try Context.create(handle, store.allocator); + } + return &lazy.value.?; + } + + pub fn getCached(lazy: *LazyResource) *const T { + if (@import("builtin").mode == .Debug) { + std.debug.assert(lazy.mutex.state.load(.acquire) == .unlocked); + } + return &lazy.value.?; + } + }; + } + + const DocumentStoreContext = struct { + fn create(handle: *Handle, allocator: std.mem.Allocator) error{OutOfMemory}!DocumentScope { + var document_scope: DocumentScope = try .init(allocator, &handle.tree); + errdefer document_scope.deinit(allocator); + + // remove unused capacity + document_scope.extra.shrinkAndFree(allocator, document_scope.extra.items.len); + try document_scope.declarations.setCapacity(allocator, document_scope.declarations.len); + try document_scope.scopes.setCapacity(allocator, document_scope.scopes.len); + + return document_scope; + } + fn deinit(document_scope: *DocumentScope, allocator: std.mem.Allocator) void { + document_scope.deinit(allocator); + } + }; + + const ImportUrisContext = struct { + fn create(handle: *Handle, allocator: std.mem.Allocator) error{OutOfMemory}![]const Uri { + var imports = try analysis.collectImports(allocator, &handle.tree); + defer imports.deinit(allocator); + + const base_path = handle.uri.toFsPath(allocator) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.UnsupportedScheme => return &.{}, + }; + defer allocator.free(base_path); + + var uris: std.ArrayList(Uri) = try .initCapacity(allocator, imports.items.len); + errdefer { + for (uris.items) |uri| uri.deinit(allocator); + uris.deinit(allocator); + } + + for (imports.items) |import_str| { + if (!std.mem.endsWith(u8, import_str, ".zig")) continue; + uris.appendAssumeCapacity(try resolveFileImportString(allocator, base_path, import_str) orelse continue); + } + + return try uris.toOwnedSlice(allocator); + } + fn deinit(import_uris: *[]const Uri, allocator: std.mem.Allocator) void { + for (import_uris.*) |uri| uri.deinit(allocator); + allocator.free(import_uris.*); + } + }; }; pub const ErrorMessage = struct { @@ -1479,7 +1445,7 @@ pub fn collectDependencies( const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - const import_uris = try handle.getImportUris(); + const import_uris = (try handle.import_uris.get(handle)).*; try dependencies.ensureUnusedCapacity(allocator, import_uris.len + handle.cimports.len); for (import_uris) |uri| { diff --git a/src/analysis.zig b/src/analysis.zig index e660dc904..af7bd88e2 100644 --- a/src/analysis.zig +++ b/src/analysis.zig @@ -4515,7 +4515,7 @@ pub const ScopeWithHandle = struct { pub fn toNode(scope_handle: ScopeWithHandle) Ast.Node.Index { if (scope_handle.scope == Scope.Index.root) return .root; - var doc_scope = scope_handle.handle.getDocumentScopeCached(); + var doc_scope = scope_handle.handle.document_scope.getCached(); return doc_scope.getScopeAstNode(scope_handle.scope).?; } From b7f44e582aaa6953d0d015513c31ef8b4b52c1e9 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 29 Jan 2026 01:24:36 +0100 Subject: [PATCH 5/8] synchronize threads that load the same file Previously threads would redundantly load the same document when calling `getOrLoadHandle` on the same file. Instead, this change will make sure that only one thread does the work while the rest waits. Co-Authored-By: SuperAuguste <19855629+SuperAuguste@users.noreply.github.com> --- src/DocumentStore.zig | 417 ++++++++++++++++++++++++------------ src/Server.zig | 3 +- src/Uri.zig | 7 + src/features/references.zig | 3 +- tests/analysis_check.zig | 2 +- 5 files changed, 291 insertions(+), 141 deletions(-) diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 99c1115f8..413f25989 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -23,7 +23,7 @@ allocator: std.mem.Allocator, config: Config, mutex: std.Io.Mutex = .init, wait_group: if (supports_build_system) std.Io.Group else void = if (supports_build_system) .init else {}, -handles: Uri.ArrayHashMap(*Handle) = .empty, +handles: Uri.ArrayHashMap(*Handle.Future) = .empty, build_files: if (supports_build_system) Uri.ArrayHashMap(*BuildFile) else void = if (supports_build_system) .empty else {}, cimports: if (supports_build_system) std.AutoArrayHashMapUnmanaged(Hash, translate_c.Result) else void = if (supports_build_system) .empty else {}, diagnostics_collection: *DiagnosticsCollection, @@ -188,59 +188,12 @@ pub const Handle = struct { impl: struct { store: *DocumentStore, lock: std.Io.Mutex = .init, + has_tree_and_source: bool, + associated_build_file: AssociatedBuildFile.State = .init, associated_compilation_units: GetAssociatedCompilationUnitsResult = .unresolved, }, - /// Takes ownership of `text` on success. - pub fn init( - store: *DocumentStore, - uri: Uri, - text: [:0]const u8, - lsp_synced: bool, - ) error{OutOfMemory}!Handle { - const allocator = store.allocator; - const mode: Ast.Mode = if (std.mem.eql(u8, std.fs.path.extension(uri.raw), ".zon")) .zon else .zig; - - var tree = try parseTree(allocator, text, mode); - errdefer tree.deinit(allocator); - - var cimports = try collectCIncludes(allocator, &tree); - errdefer cimports.deinit(allocator); - - return .{ - .uri = uri, - .tree = tree, - .cimports = cimports, - .lsp_synced = lsp_synced, - .impl = .{ - .store = store, - }, - }; - } - - /// Caller must free `Handle.uri` if needed. - fn deinit(self: *Handle) void { - const tracy_zone = tracy.trace(@src()); - defer tracy_zone.end(); - - const allocator = self.impl.store.allocator; - - allocator.free(self.tree.source); - self.tree.deinit(allocator); - - self.document_scope.deinit(allocator); - self.import_uris.deinit(allocator); - - for (self.cimports.items(.source)) |source| allocator.free(source); - self.cimports.deinit(allocator); - - self.impl.associated_build_file.deinit(allocator); - self.impl.associated_compilation_units.deinit(allocator); - - self.* = undefined; - } - pub fn getDocumentScope(self: *Handle) error{OutOfMemory}!*const DocumentScope { return try self.document_scope.get(self); } @@ -460,6 +413,42 @@ pub const Handle = struct { return self.impl.associated_compilation_units; } + fn refresh( + handle: *Handle, + /// Old state will be moved into this handle to be deallocated after returning. + old_handle: *Handle, + /// Takes ownership. + text: [:0]const u8, + allocator: std.mem.Allocator, + ) error{OutOfMemory}!void { + const tracy_zone = tracy.traceNamed(@src(), "Handle.refresh"); + defer tracy_zone.end(); + + const mode: Ast.Mode = if (std.mem.eql(u8, std.fs.path.extension(handle.uri.raw), ".zon")) .zon else .zig; + var new_tree = try parseTree(allocator, text, mode); + errdefer new_tree.deinit(allocator); + + var new_cimports = try collectCIncludes(allocator, &new_tree); + errdefer new_cimports.deinit(allocator); + + errdefer comptime unreachable; + + if (handle.impl.has_tree_and_source) { + old_handle.tree = handle.tree; + old_handle.impl.has_tree_and_source = true; + } + old_handle.cimports = handle.cimports; + + handle.tree = new_tree; + handle.cimports = new_cimports; + handle.impl.has_tree_and_source = true; + + old_handle.document_scope = handle.document_scope; + handle.document_scope = .unset; + old_handle.import_uris = handle.import_uris; + handle.import_uris = .unset; + } + fn parseTree(allocator: std.mem.Allocator, new_text: [:0]const u8, mode: Ast.Mode) error{OutOfMemory}!Ast { const tracy_zone_inner = tracy.traceNamed(@src(), "Ast.parse"); defer tracy_zone_inner.end(); @@ -479,6 +468,51 @@ pub const Handle = struct { return tree; } + /// A handle that can only be deallocated. Keep in sync with `deinit`. + const dead: Handle = .{ + .uri = undefined, + .tree = undefined, + .cimports = .empty, + .lsp_synced = undefined, + .impl = .{ + .store = undefined, + .has_tree_and_source = false, + }, + }; + + /// Caller must free `Handle.uri` if needed. + /// Keep in sync with `dead`. + fn deinit(self: *Handle, allocator: std.mem.Allocator) void { + const tracy_zone = tracy.trace(@src()); + defer tracy_zone.end(); + + if (self.impl.has_tree_and_source) { + allocator.free(self.tree.source); + self.tree.deinit(allocator); + } + self.document_scope.deinit(allocator); + self.import_uris.deinit(allocator); + + for (self.cimports.items(.source)) |source| allocator.free(source); + self.cimports.deinit(allocator); + + self.impl.associated_build_file.deinit(allocator); + self.impl.associated_compilation_units.deinit(allocator); + + self.* = undefined; + } + + const Future = struct { + event: std.Io.Event, + handle: Handle, + err: ?ReadFileError, + + pub fn await(f: *Future, io: std.Io) ReadFileError!*Handle { + try f.event.wait(io); + return f.err orelse return &f.handle; + } + }; + fn Lazy(comptime T: type, comptime Context: type) type { return struct { mutex: std.Io.Mutex, @@ -569,6 +603,25 @@ pub const Handle = struct { }; }; +pub const HandleIterator = struct { + store: *DocumentStore, + i: usize = 0, + + pub fn next(it: *HandleIterator) ?*Handle { + it.store.mutex.lockUncancelable(it.store.io); + defer it.store.mutex.unlock(it.store.io); + while (true) { + defer it.i += 1; + switch (std.math.order(it.i, it.store.handles.count())) { + .lt => {}, + .eq => return null, + .gt => unreachable, // handle count decreased + } + return it.store.handles.values()[it.i].await(it.store.io) catch continue; + } + } +}; + pub const ErrorMessage = struct { loc: offsets.Loc, code: []const u8, @@ -580,10 +633,10 @@ pub fn deinit(self: *DocumentStore) void { self.wait_group.cancel(self.io); } - for (self.handles.keys(), self.handles.values()) |uri, handle| { - handle.deinit(); - self.allocator.destroy(handle); + for (self.handles.keys(), self.handles.values()) |uri, future| { + if (future.await(self.io)) |handle| handle.deinit(self.allocator) else |_| {} uri.deinit(self.allocator); + self.allocator.destroy(future); } self.handles.deinit(self.allocator); @@ -607,26 +660,27 @@ pub fn deinit(self: *DocumentStore) void { /// **Thread safe** takes a shared lock /// This function does not protect against data races from modifying the Handle pub fn getHandle(self: *DocumentStore, uri: Uri) ?*Handle { - self.mutex.lockUncancelable(self.io); - defer self.mutex.unlock(self.io); - return self.handles.get(uri); + const future = future: { + self.mutex.lockUncancelable(self.io); + defer self.mutex.unlock(self.io); + break :future self.handles.get(uri) orelse return null; + }; + return future.await(self.io) catch return null; } -fn readFile(self: *DocumentStore, uri: Uri) error{ Canceled, OutOfMemory }!?[:0]u8 { +const ReadFileError = std.mem.Allocator.Error || std.Io.Cancelable || std.Io.File.OpenError || std.Io.File.Reader.Error || error{StreamTooLong}; + +/// Must satisfy `uri.isFileScheme()`. +fn readFile(self: *DocumentStore, uri: Uri) ReadFileError![:0]u8 { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); const file_path = uri.toFsPath(self.allocator) catch |err| switch (err) { - error.UnsupportedScheme => return null, // https://github.com/microsoft/language-server-protocol/issues/1264 + error.UnsupportedScheme => unreachable, error.OutOfMemory => return error.OutOfMemory, }; defer self.allocator.free(file_path); - if (!std.fs.path.isAbsolute(file_path)) { - log.err("file path is not absolute '{s}'", .{file_path}); - return null; - } - const dir, const sub_path = blk: { if (builtin.target.cpu.arch.isWasm() and !builtin.link_libc) { for (self.config.wasi_preopens.map.keys()[3..], 3..) |name, i| { @@ -642,20 +696,14 @@ fn readFile(self: *DocumentStore, uri: Uri) error{ Canceled, OutOfMemory }!?[:0] break :blk .{ std.Io.Dir.cwd(), file_path }; }; - return dir.readFileAllocOptions( + return try dir.readFileAllocOptions( self.io, sub_path, self.allocator, .limited(std.zig.max_src_size), .of(u8), 0, - ) catch |err| switch (err) { - error.Canceled, error.OutOfMemory => |e| return e, - else => { - log.err("failed to read document '{s}': {}", .{ file_path, err }); - return null; - }, - }; + ); } /// Returns a handle to the given document @@ -666,9 +714,19 @@ pub fn getOrLoadHandle(self: *DocumentStore, uri: Uri) error{ Canceled, OutOfMem const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - if (self.getHandle(uri)) |handle| return handle; - const file_contents = try self.readFile(uri) orelse return null; - return try self.createAndStoreDocument(uri, file_contents, false); + if (!uri.isFileScheme()) return self.getHandle(uri); + return self.createAndStoreDocument( + uri, + .uri, + .{ + .lsp_synced = false, + .override = false, + .load_build_file_behaviour = .never, + }, + ) catch |err| switch (err) { + error.Canceled, error.OutOfMemory => |e| return e, + else => return null, + }; } /// **Thread safe** takes a shared lock @@ -717,14 +775,25 @@ pub fn openLspSyncedDocument(self: *DocumentStore, uri: Uri, text: []const u8) e const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - if (self.handles.get(uri)) |handle| { + if (self.getHandle(uri)) |handle| { if (handle.lsp_synced) { log.warn("Document already open: {s}", .{uri.raw}); } } const duped_text = try self.allocator.dupeZ(u8, text); - _ = try self.createAndStoreDocument(uri, duped_text, true); + _ = self.createAndStoreDocument( + uri, + .{ .text = duped_text }, + .{ + .lsp_synced = true, + .override = true, + .load_build_file_behaviour = .load_but_dont_update, + }, + ) catch |err| switch (err) { + error.Canceled, error.OutOfMemory => |e| return e, + else => unreachable, + }; } /// Closes a document that has been synced over the LSP protocol (`textDocument/didClose`). @@ -733,17 +802,24 @@ pub fn closeLspSyncedDocument(self: *DocumentStore, uri: Uri) void { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - const kv = self.handles.fetchSwapRemove(uri) orelse { + const handle_index = self.handles.getIndex(uri) orelse { log.warn("Document not found: {s}", .{uri.raw}); return; }; - if (!kv.value.lsp_synced) { - log.warn("Document already closed: {s}", .{uri.raw}); - } + defer self.handles.swapRemoveAt(handle_index); + + const handle_uri = self.handles.keys()[handle_index]; + defer handle_uri.deinit(self.allocator); + + const handle_future = self.handles.values()[handle_index]; + defer self.allocator.destroy(handle_future); - kv.key.deinit(self.allocator); - kv.value.deinit(); - self.allocator.destroy(kv.value); + if (handle_future.await(self.io)) |handle| { + if (!handle.lsp_synced) { + log.warn("Document already closed: {s}", .{uri.raw}); + } + handle.deinit(self.allocator); + } else |_| {} } /// Updates a document that is synced over the LSP protocol (`textDocument/didChange`). @@ -753,7 +829,7 @@ pub fn refreshLspSyncedDocument(self: *DocumentStore, uri: Uri, new_text: [:0]co const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - if (self.handles.get(uri)) |old_handle| { + if (self.getHandle(uri)) |old_handle| { if (!old_handle.lsp_synced) { log.warn("Document modified without being opened: {s}", .{uri.raw}); } @@ -761,7 +837,18 @@ pub fn refreshLspSyncedDocument(self: *DocumentStore, uri: Uri, new_text: [:0]co log.warn("Document modified without being opened: {s}", .{uri.raw}); } - _ = try self.createAndStoreDocument(uri, new_text, true); + _ = self.createAndStoreDocument( + uri, + .{ .text = new_text }, + .{ + .lsp_synced = true, + .override = true, + .load_build_file_behaviour = .only_update, + }, + ) catch |err| switch (err) { + error.Canceled, error.OutOfMemory => |e| return e, + else => unreachable, + }; } /// Refreshes a document from the file system, unless said document is synced over the LSP protocol. @@ -772,20 +859,30 @@ pub fn refreshDocumentFromFileSystem(self: *DocumentStore, uri: Uri, should_dele if (should_delete) { const index = self.handles.getIndex(uri) orelse return false; - const handle = self.handles.values()[index]; + const handle_future = self.handles.values()[index]; + const handle = handle_future.await(self.io) catch return false; if (handle.lsp_synced) return false; - self.handles.swapRemoveAt(index); - const handle_uri = handle.uri; - handle.deinit(); - self.allocator.destroy(handle); - handle_uri.deinit(self.allocator); + handle.uri.deinit(self.allocator); + handle.deinit(self.allocator); + self.allocator.destroy(handle_future); } else { - if (self.handles.get(uri)) |handle| { + if (self.getHandle(uri)) |handle| { if (handle.lsp_synced) return false; } else return false; - const file_contents = try self.readFile(uri) orelse return false; - _ = try self.createAndStoreDocument(uri, file_contents, false); + if (uri.isFileScheme()) return false; + _ = self.createAndStoreDocument( + uri, + .uri, + .{ + .lsp_synced = false, + .override = true, + .load_build_file_behaviour = .only_update, + }, + ) catch |err| switch (err) { + error.Canceled, error.OutOfMemory => |e| return e, + else => return false, + }; } return true; @@ -1326,68 +1423,112 @@ fn uriInImports( return false; } -/// takes ownership of the `text` passed in. +const FileSource = union(enum) { + text: [:0]const u8, + /// Must satisfy `uri.isFileScheme()`. + uri, +}; + +const CreateAndStoreOptions = struct { + lsp_synced: bool, + override: bool, + load_build_file_behaviour: enum { load_but_dont_update, only_update, never }, +}; + /// **Thread safe** takes an exclusive lock fn createAndStoreDocument( - self: *DocumentStore, + store: *DocumentStore, uri: Uri, - text: [:0]const u8, - lsp_synced: bool, -) error{ Canceled, OutOfMemory }!*Handle { + /// Takes ownership. + file_source: FileSource, + options: CreateAndStoreOptions, +) ReadFileError!*Handle { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - var new_handle = Handle.init(self, uri, text, lsp_synced) catch |err| switch (err) { - error.OutOfMemory => { - self.allocator.free(text); - return err; - }, + std.debug.assert(!(options.lsp_synced and !options.override)); + + switch (file_source) { + .text => {}, + .uri => std.debug.assert(uri.isFileScheme()), + } + errdefer switch (file_source) { + .text => |text| store.allocator.free(text), + .uri => {}, }; - errdefer new_handle.deinit(); - if (supports_build_system and lsp_synced and isBuildFile(uri) and !isInStd(uri)) { - if (self.getBuildFile(uri)) |build_file| { - self.invalidateBuildFile(build_file.uri); - } else { - _ = try self.getOrLoadBuildFile(uri); + if (supports_build_system and options.lsp_synced and isBuildFile(uri) and !isInStd(uri)) { + switch (options.load_build_file_behaviour) { + .load_but_dont_update => { + _ = try store.getOrLoadBuildFile(uri); + }, + .only_update => { + store.invalidateBuildFile(uri); + }, + .never => {}, } } - try self.mutex.lock(self.io); - defer self.mutex.unlock(self.io); - - const gop = try self.handles.getOrPut(self.allocator, uri); - errdefer if (!gop.found_existing) std.debug.assert(self.handles.swapRemove(uri)); - - if (gop.found_existing) { - std.debug.assert(new_handle.impl.associated_build_file == .init); - std.debug.assert(new_handle.impl.associated_compilation_units == .unresolved); - if (lsp_synced) { - new_handle.impl.associated_build_file = gop.value_ptr.*.impl.associated_build_file; - gop.value_ptr.*.impl.associated_build_file = .init; + const handle_future: *Handle.Future = handle_future: { + try store.mutex.lock(store.io); + defer store.mutex.unlock(store.io); - new_handle.impl.associated_compilation_units = gop.value_ptr.*.impl.associated_compilation_units; - gop.value_ptr.*.impl.associated_compilation_units = .unresolved; + const gop = try store.handles.getOrPut(store.allocator, uri); + errdefer store.handles.swapRemoveAt(gop.index); - new_handle.uri = gop.key_ptr.*; - gop.value_ptr.*.deinit(); - gop.value_ptr.*.* = new_handle; + if (gop.found_existing) { + if (gop.value_ptr.*.await(store.io)) |old_handle| { + if (!options.override) return old_handle; + gop.value_ptr.*.event.reset(); + old_handle.lsp_synced = options.lsp_synced; + break :handle_future gop.value_ptr.*; + } else |_| { + // + } } else { - // TODO prevent concurrent `createAndStoreDocument` invocations from racing each other - new_handle.deinit(); + gop.key_ptr.* = try uri.dupe(store.allocator); + errdefer gop.key_ptr.*.deinit(store.allocator); + + gop.value_ptr.* = try store.allocator.create(Handle.Future); + errdefer store.allocator.destroy(gop.value_ptr.*); } - } else { - gop.key_ptr.* = try uri.dupe(self.allocator); - errdefer gop.key_ptr.*.deinit(self.allocator); + gop.value_ptr.*.* = .{ + .event = .unset, + .handle = .{ + .uri = gop.key_ptr.*, + .tree = undefined, + .cimports = .empty, + .lsp_synced = options.lsp_synced, + .impl = .{ + .store = store, + .has_tree_and_source = false, + }, + }, + .err = null, + }; + break :handle_future gop.value_ptr.*; + }; + defer handle_future.event.set(store.io); + errdefer |err| { + if (err != error.Canceled) log.err("failed to read document '{s}': {}", .{ uri.raw, err }); + handle_future.err = err; + } - gop.value_ptr.* = try self.allocator.create(Handle); - errdefer self.allocator.destroy(gop.value_ptr.*); + const text: [:0]const u8 = switch (file_source) { + .text => |text| text, + .uri => try store.readFile(uri), + }; - new_handle.uri = gop.key_ptr.*; - gop.value_ptr.*.* = new_handle; - } + var old_handle: Handle = .dead; + try Handle.refresh( + &handle_future.handle, + &old_handle, + text, + store.allocator, + ); + old_handle.deinit(store.allocator); - return gop.value_ptr.*; + return &handle_future.handle; } pub const CImportHandle = struct { diff --git a/src/Server.zig b/src/Server.zig index 7b64135cd..64706cfe4 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -1029,7 +1029,8 @@ pub fn resolveConfiguration(server: *Server) error{ Canceled, OutOfMemory }!void (new_zig_exe_path or new_zig_lib_path) and server.client_capabilities.supports_publish_diagnostics) { - for (server.document_store.handles.values()) |handle| { + var it: DocumentStore.HandleIterator = .{ .store = &server.document_store }; + while (it.next()) |handle| { if (!handle.lsp_synced) continue; server.generateDiagnostics(handle); } diff --git a/src/Uri.zig b/src/Uri.zig index cc10f6691..3970e8ad2 100644 --- a/src/Uri.zig +++ b/src/Uri.zig @@ -317,6 +317,13 @@ test "fromPath - windows like path on posix" { try std.testing.expectEqualStrings(reparsed_uri.raw, uri.raw); } +pub fn isFileScheme(uri: Uri) bool { + const scheme = for (uri.raw, 0..) |byte, i| { + if (!isSchemeChar(byte)) break uri.raw[0..i]; + } else unreachable; // The Uri is guranteed to be valid + return std.mem.eql(u8, scheme, "file"); +} + /// Converts a Uri to a file system path. /// Caller owns the returned memory pub fn toFsPath( diff --git a/src/features/references.zig b/src/features/references.zig index 2e92f5c45..e0a074d5d 100644 --- a/src/features/references.zig +++ b/src/features/references.zig @@ -239,7 +239,8 @@ fn gatherReferences( dependencies.deinit(allocator); } - for (analyser.store.handles.values()) |handle| { + var it: DocumentStore.HandleIterator = .{ .store = analyser.store }; + while (it.next()) |handle| { if (skip_std_references and DocumentStore.isInStd(handle.uri)) { if (!include_decl or !handle.uri.eql(curr_handle.uri)) continue; diff --git a/tests/analysis_check.zig b/tests/analysis_check.zig index aeab162db..7f5d3d73f 100644 --- a/tests/analysis_check.zig +++ b/tests/analysis_check.zig @@ -139,7 +139,7 @@ pub fn main(init: std.process.Init) Error!void { const handle_uri: zls.Uri = try .fromPath(arena, file_path); try document_store.openLspSyncedDocument(handle_uri, source); - const handle: *zls.DocumentStore.Handle = document_store.handles.get(handle_uri).?; + const handle: *zls.DocumentStore.Handle = document_store.getHandle(handle_uri).?; var error_builder: ErrorBuilder = .init(gpa); defer error_builder.deinit(); From 6d74fcd3bb0cb913df809b8a2421628643ee064d Mon Sep 17 00:00:00 2001 From: Techatrix Date: Wed, 28 Jan 2026 22:39:42 +0100 Subject: [PATCH 6/8] update tracy zones --- src/DocumentStore.zig | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 413f25989..26bc76893 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -450,8 +450,8 @@ pub const Handle = struct { } fn parseTree(allocator: std.mem.Allocator, new_text: [:0]const u8, mode: Ast.Mode) error{OutOfMemory}!Ast { - const tracy_zone_inner = tracy.traceNamed(@src(), "Ast.parse"); - defer tracy_zone_inner.end(); + const tracy_zone = tracy.traceNamed(@src(), "Ast.parse"); + defer tracy_zone.end(); var tree = try Ast.parse(allocator, new_text, mode); errdefer tree.deinit(allocator); @@ -1002,6 +1002,9 @@ fn notifyBuildEnd(self: *DocumentStore, status: EndStatus) void { } fn invalidateBuildFileWorker(self: *DocumentStore, build_file: *BuildFile) std.Io.Cancelable!void { + const tracy_zone = tracy.trace(@src()); + defer tracy_zone.end(); + { try build_file.impl.mutex.lock(self.io); defer build_file.impl.mutex.unlock(self.io); @@ -1279,6 +1282,9 @@ fn buildDotZigExists(io: std.Io, dir_path: []const u8) std.Io.Cancelable!bool { /// See `Handle.getAssociatedBuildFile`. /// Caller owns returned memory. fn collectPotentialBuildFiles(self: *DocumentStore, uri: Uri) error{ Canceled, OutOfMemory }![]*BuildFile { + const tracy_zone = tracy.trace(@src()); + defer tracy_zone.end(); + if (isInStd(uri)) return &.{}; var potential_build_files: std.ArrayList(*BuildFile) = .empty; From efabaf8ac6d1ebbc6e9a44a8bf89ef2483fc8b5d Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 29 Jan 2026 01:05:04 +0100 Subject: [PATCH 7/8] use c allocator when linking libc This is only meant for debugging with valgrind or sanitizers. --- src/main.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main.zig b/src/main.zig index 70f18b838..c8a55f364 100644 --- a/src/main.zig +++ b/src/main.zig @@ -515,6 +515,7 @@ var debug_allocator: std.heap.DebugAllocator(.{}) = .init; pub fn main(init: std.process.Init.Minimal) !u8 { const base_allocator, const is_debug = gpa: { if (exe_options.debug_gpa) break :gpa .{ debug_allocator.allocator(), true }; + if (zig_builtin.link_libc) break :gpa .{ std.heap.c_allocator, false }; if (zig_builtin.target.os.tag == .wasi) break :gpa .{ std.heap.wasm_allocator, false }; break :gpa switch (zig_builtin.mode) { .Debug => .{ debug_allocator.allocator(), true }, From 532701fe02ce0f5fc9a9e9f16d448dd99de5f3f2 Mon Sep 17 00:00:00 2001 From: Techatrix Date: Thu, 29 Jan 2026 01:06:32 +0100 Subject: [PATCH 8/8] fix data race in `Server.keepRunning` --- src/Server.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Server.zig b/src/Server.zig index 64706cfe4..c4dc12c9f 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -1720,7 +1720,7 @@ pub fn setTransport(server: *Server, transport: *lsp.Transport) void { server.document_store.transport = transport; } -pub fn keepRunning(server: Server) bool { +pub fn keepRunning(server: *const Server) bool { switch (server.status) { .exiting_success, .exiting_failure => return false, else => return true,