mirror of
https://github.com/odin-lang/Odin.git
synced 2026-04-18 20:40:28 +00:00
-go-to-definitions (OGTD file format)
This commit is contained in:
@@ -33,6 +33,7 @@ template <typename T> Array<T> array_make (gbAllocator const &a, isize
|
||||
template <typename T> Array<T> array_make_from_ptr (T *data, isize count, isize capacity);
|
||||
template <typename T> void array_free (Array<T> *array);
|
||||
template <typename T> void array_add (Array<T> *array, T const &t);
|
||||
template <typename T> void array_add_elems (Array<T> *array, T const *elems, isize elem_count);
|
||||
template <typename T> T array_pop (Array<T> *array);
|
||||
template <typename T> void array_clear (Array<T> *array);
|
||||
template <typename T> void array_reserve (Array<T> *array, isize capacity);
|
||||
@@ -157,6 +158,17 @@ void array_add(Array<T> *array, T const &t) {
|
||||
array->count++;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void array_add_elems(Array<T> *array, T const *elems, isize elem_count) {
|
||||
GB_ASSERT(elem_count >= 0);
|
||||
if (array->capacity < array->count+elem_count) {
|
||||
array__grow(array, array->count+elem_count);
|
||||
}
|
||||
gb_memmove(array->data + array->count, elems, elem_count * gb_size_of(T));
|
||||
array->count += elem_count;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
gb_inline T array_pop(Array<T> *array) {
|
||||
GB_ASSERT(array->count > 0);
|
||||
|
||||
@@ -69,6 +69,19 @@ struct TargetMetrics {
|
||||
};
|
||||
|
||||
|
||||
enum QueryDataSetKind {
|
||||
QueryDataSet_Invalid,
|
||||
QueryDataSet_GlobalDefinitions,
|
||||
QueryDataSet_GoToDefinitions,
|
||||
};
|
||||
|
||||
struct QueryDataSetSettings {
|
||||
QueryDataSetKind kind;
|
||||
bool ok;
|
||||
bool compact;
|
||||
};
|
||||
|
||||
|
||||
// This stores the information for the specify architecture of this build
|
||||
struct BuildContext {
|
||||
// Constants
|
||||
@@ -105,12 +118,12 @@ struct BuildContext {
|
||||
bool ignore_unknown_attributes;
|
||||
bool no_bounds_check;
|
||||
bool no_output_files;
|
||||
bool print_query_data;
|
||||
bool print_query_data_compact;
|
||||
bool no_crt;
|
||||
bool use_lld;
|
||||
bool vet;
|
||||
|
||||
QueryDataSetSettings query_data_set_settings;
|
||||
|
||||
gbAffinity affinity;
|
||||
isize thread_count;
|
||||
|
||||
|
||||
@@ -371,6 +371,7 @@ bool find_or_generate_polymorphic_procedure(CheckerContext *c, Entity *base_enti
|
||||
add_entity_and_decl_info(&nctx, ident, entity, d);
|
||||
// NOTE(bill): Set the scope afterwards as this is not real overloading
|
||||
entity->scope = scope->parent;
|
||||
entity->file = base_entity->file;
|
||||
entity->pkg = base_entity->pkg;
|
||||
|
||||
AstFile *file = nullptr;
|
||||
|
||||
@@ -301,6 +301,7 @@ void add_polymorphic_record_entity(CheckerContext *ctx, Ast *node, Type *named_t
|
||||
|
||||
e = alloc_entity_type_name(s, token, named_type);
|
||||
e->state = EntityState_Resolved;
|
||||
e->file = ctx->file;
|
||||
e->pkg = ctx->pkg;
|
||||
add_entity_use(ctx, node, e);
|
||||
}
|
||||
|
||||
@@ -790,6 +790,11 @@ void init_checker_info(CheckerInfo *i) {
|
||||
map_init(&i->files, a);
|
||||
map_init(&i->packages, a);
|
||||
array_init(&i->variable_init_order, a);
|
||||
|
||||
i->allow_identifier_uses = build_context.query_data_set_settings.kind == QueryDataSet_GoToDefinitions;
|
||||
if (i->allow_identifier_uses) {
|
||||
array_init(&i->identifier_uses, a);
|
||||
}
|
||||
}
|
||||
|
||||
void destroy_checker_info(CheckerInfo *i) {
|
||||
@@ -804,6 +809,7 @@ void destroy_checker_info(CheckerInfo *i) {
|
||||
map_destroy(&i->files);
|
||||
map_destroy(&i->packages);
|
||||
array_free(&i->variable_init_order);
|
||||
array_free(&i->identifier_uses);
|
||||
}
|
||||
|
||||
CheckerContext make_checker_context(Checker *c) {
|
||||
@@ -1025,7 +1031,6 @@ void add_entity_definition(CheckerInfo *i, Ast *identifier, Entity *entity) {
|
||||
return;
|
||||
}
|
||||
GB_ASSERT(entity != nullptr);
|
||||
|
||||
identifier->Ident.entity = entity;
|
||||
entity->identifier = identifier;
|
||||
array_add(&i->definitions, entity);
|
||||
@@ -1069,6 +1074,10 @@ bool add_entity_with_name(Checker *c, Scope *scope, Ast *identifier, Entity *ent
|
||||
}
|
||||
}
|
||||
if (identifier != nullptr) {
|
||||
if (entity->file == nullptr) {
|
||||
GB_ASSERT(c->curr_ctx != nullptr);
|
||||
entity->file = c->curr_ctx->file;
|
||||
}
|
||||
add_entity_definition(&c->info, identifier, entity);
|
||||
}
|
||||
return true;
|
||||
@@ -1090,6 +1099,10 @@ void add_entity_use(CheckerContext *c, Ast *identifier, Entity *entity) {
|
||||
}
|
||||
identifier->Ident.entity = entity;
|
||||
|
||||
if (c->info->allow_identifier_uses) {
|
||||
array_add(&c->info->identifier_uses, identifier);
|
||||
}
|
||||
|
||||
String dmsg = entity->deprecated_message;
|
||||
if (dmsg.len > 0) {
|
||||
warning(identifier, "%.*s is deprecated: %.*s", LIT(entity->token.string), LIT(dmsg));
|
||||
@@ -1342,6 +1355,7 @@ void add_curr_ast_file(CheckerContext *ctx, AstFile *file) {
|
||||
ctx->decl = file->pkg->decl_info;
|
||||
ctx->scope = file->scope;
|
||||
ctx->pkg = file->pkg;
|
||||
ctx->checker->curr_ctx = ctx;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -456,6 +456,10 @@ struct CheckerInfo {
|
||||
Entity * entry_point;
|
||||
PtrSet<Entity *> minimum_dependency_set;
|
||||
PtrSet<isize> minimum_dependency_type_info_set;
|
||||
|
||||
|
||||
bool allow_identifier_uses;
|
||||
Array<Ast *> identifier_uses; // only used by 'odin query'
|
||||
};
|
||||
|
||||
struct CheckerContext {
|
||||
@@ -497,6 +501,7 @@ struct Checker {
|
||||
Array<ProcInfo> procs_to_check;
|
||||
Array<Entity *> procs_with_deferred_to_check;
|
||||
|
||||
CheckerContext *curr_ctx;
|
||||
gbAllocator allocator;
|
||||
CheckerContext init_ctx;
|
||||
};
|
||||
|
||||
@@ -91,6 +91,7 @@ struct Entity {
|
||||
Ast * identifier; // Can be nullptr
|
||||
DeclInfo * decl_info;
|
||||
DeclInfo * parent_proc_decl; // nullptr if in file/global scope
|
||||
AstFile * file;
|
||||
AstPackage *pkg;
|
||||
|
||||
// TODO(bill): Cleanup how `using` works for entities
|
||||
|
||||
475
src/main.cpp
475
src/main.cpp
@@ -218,7 +218,10 @@ enum BuildFlagKind {
|
||||
BuildFlag_UseLLD,
|
||||
BuildFlag_Vet,
|
||||
BuildFlag_IgnoreUnknownAttributes,
|
||||
|
||||
BuildFlag_Compact,
|
||||
BuildFlag_GlobalDefinitions,
|
||||
BuildFlag_GoToDefinitions,
|
||||
|
||||
#if defined(GB_SYSTEM_WINDOWS)
|
||||
BuildFlag_ResourceFile,
|
||||
@@ -303,7 +306,10 @@ bool parse_build_flags(Array<String> args) {
|
||||
add_flag(&build_flags, BuildFlag_UseLLD, str_lit("lld"), BuildFlagParam_None);
|
||||
add_flag(&build_flags, BuildFlag_Vet, str_lit("vet"), BuildFlagParam_None);
|
||||
add_flag(&build_flags, BuildFlag_IgnoreUnknownAttributes, str_lit("ignore-unknown-attributes"), BuildFlagParam_None);
|
||||
|
||||
add_flag(&build_flags, BuildFlag_Compact, str_lit("compact"), BuildFlagParam_None);
|
||||
add_flag(&build_flags, BuildFlag_GlobalDefinitions, str_lit("global-definitions"), BuildFlagParam_None);
|
||||
add_flag(&build_flags, BuildFlag_GoToDefinitions, str_lit("go-to-definitions"), BuildFlagParam_None);
|
||||
|
||||
#if defined(GB_SYSTEM_WINDOWS)
|
||||
add_flag(&build_flags, BuildFlag_ResourceFile, str_lit("resource"), BuildFlagParam_String);
|
||||
@@ -667,11 +673,34 @@ bool parse_build_flags(Array<String> args) {
|
||||
break;
|
||||
|
||||
case BuildFlag_Compact:
|
||||
if (!build_context.print_query_data) {
|
||||
if (!build_context.query_data_set_settings.ok) {
|
||||
gb_printf_err("Invalid use of -compact flag, only allowed with 'odin query'\n");
|
||||
bad_flags = true;
|
||||
} else {
|
||||
build_context.print_query_data_compact = true;
|
||||
build_context.query_data_set_settings.compact = true;
|
||||
}
|
||||
break;
|
||||
|
||||
case BuildFlag_GlobalDefinitions:
|
||||
if (!build_context.query_data_set_settings.ok) {
|
||||
gb_printf_err("Invalid use of -global-definitions flag, only allowed with 'odin query'\n");
|
||||
bad_flags = true;
|
||||
} else if (build_context.query_data_set_settings.kind != QueryDataSet_Invalid) {
|
||||
gb_printf_err("Invalid use of -global-definitions flag, a previous flag for 'odin query' was set\n");
|
||||
bad_flags = true;
|
||||
} else {
|
||||
build_context.query_data_set_settings.kind = QueryDataSet_GlobalDefinitions;
|
||||
}
|
||||
break;
|
||||
case BuildFlag_GoToDefinitions:
|
||||
if (!build_context.query_data_set_settings.ok) {
|
||||
gb_printf_err("Invalid use of -go-to-definitions flag, only allowed with 'odin query'\n");
|
||||
bad_flags = true;
|
||||
} else if (build_context.query_data_set_settings.kind != QueryDataSet_Invalid) {
|
||||
gb_printf_err("Invalid use of -global-definitions flag, a previous flag for 'odin query' was set\n");
|
||||
bad_flags = true;
|
||||
} else {
|
||||
build_context.query_data_set_settings.kind = QueryDataSet_GoToDefinitions;
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -731,6 +760,16 @@ bool parse_build_flags(Array<String> args) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (build_context.query_data_set_settings.ok) {
|
||||
if (build_context.query_data_set_settings.kind == QueryDataSet_Invalid) {
|
||||
gb_printf_err("'odin query' requires a flag determining the kind of query data set to be returned\n");
|
||||
gb_printf_err("\t-global-definitions : outputs a JSON file of global definitions\n");
|
||||
gb_printf_err("\t-go-to-definitions : outputs a OGTD binary file of go to definitions for identifiers within an Odin project\n");
|
||||
bad_flags = true;
|
||||
}
|
||||
}
|
||||
|
||||
return !bad_flags;
|
||||
}
|
||||
|
||||
@@ -828,434 +867,6 @@ void remove_temp_files(String output_base) {
|
||||
|
||||
|
||||
|
||||
int query_data_package_compare(void const *a, void const *b) {
|
||||
AstPackage *x = *cast(AstPackage *const *)a;
|
||||
AstPackage *y = *cast(AstPackage *const *)b;
|
||||
|
||||
if (x == y) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (x != nullptr && y != nullptr) {
|
||||
return string_compare(x->name, y->name);
|
||||
} else if (x != nullptr && y == nullptr) {
|
||||
return -1;
|
||||
} else if (x == nullptr && y != nullptr) {
|
||||
return +1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int query_data_definition_compare(void const *a, void const *b) {
|
||||
Entity *x = *cast(Entity *const *)a;
|
||||
Entity *y = *cast(Entity *const *)b;
|
||||
|
||||
if (x == y) {
|
||||
return 0;
|
||||
} else if (x != nullptr && y == nullptr) {
|
||||
return -1;
|
||||
} else if (x == nullptr && y != nullptr) {
|
||||
return +1;
|
||||
}
|
||||
|
||||
if (x->pkg != y->pkg) {
|
||||
i32 res = query_data_package_compare(&x->pkg, &y->pkg);
|
||||
if (res != 0) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
return string_compare(x->token.string, y->token.string);
|
||||
}
|
||||
|
||||
int entity_name_compare(void const *a, void const *b) {
|
||||
Entity *x = *cast(Entity *const *)a;
|
||||
Entity *y = *cast(Entity *const *)b;
|
||||
if (x == y) {
|
||||
return 0;
|
||||
} else if (x != nullptr && y == nullptr) {
|
||||
return -1;
|
||||
} else if (x == nullptr && y != nullptr) {
|
||||
return +1;
|
||||
}
|
||||
return string_compare(x->token.string, y->token.string);
|
||||
}
|
||||
|
||||
void generate_and_print_query_data(Checker *c, Timings *timings) {
|
||||
query_value_allocator = heap_allocator();
|
||||
|
||||
|
||||
auto *root = query_value_map();
|
||||
|
||||
if (global_error_collector.errors.count > 0) {
|
||||
auto *errors = query_value_array();
|
||||
root->add("errors", errors);
|
||||
for_array(i, global_error_collector.errors) {
|
||||
String err = string_trim_whitespace(global_error_collector.errors[i]);
|
||||
errors->add(err);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
{ // Packages
|
||||
auto *packages = query_value_array();
|
||||
root->add("packages", packages);
|
||||
|
||||
auto sorted_packages = array_make<AstPackage *>(query_value_allocator, 0, c->info.packages.entries.count);
|
||||
defer (array_free(&sorted_packages));
|
||||
|
||||
for_array(i, c->info.packages.entries) {
|
||||
AstPackage *pkg = c->info.packages.entries[i].value;
|
||||
if (pkg != nullptr) {
|
||||
array_add(&sorted_packages, pkg);
|
||||
}
|
||||
}
|
||||
gb_sort_array(sorted_packages.data, sorted_packages.count, query_data_package_compare);
|
||||
packages->reserve(sorted_packages.count);
|
||||
|
||||
for_array(i, sorted_packages) {
|
||||
AstPackage *pkg = sorted_packages[i];
|
||||
String name = pkg->name;
|
||||
String fullpath = pkg->fullpath;
|
||||
|
||||
auto *files = query_value_array();
|
||||
files->reserve(pkg->files.count);
|
||||
for_array(j, pkg->files) {
|
||||
AstFile *f = pkg->files[j];
|
||||
files->add(f->fullpath);
|
||||
}
|
||||
|
||||
auto *package = query_value_map();
|
||||
package->reserve(3);
|
||||
packages->add(package);
|
||||
|
||||
package->add("name", pkg->name);
|
||||
package->add("fullpath", pkg->fullpath);
|
||||
package->add("files", files);
|
||||
}
|
||||
}
|
||||
|
||||
if (c->info.definitions.count > 0) {
|
||||
auto *definitions = query_value_array();
|
||||
root->add("definitions", definitions);
|
||||
|
||||
auto sorted_definitions = array_make<Entity *>(query_value_allocator, 0, c->info.definitions.count);
|
||||
defer (array_free(&sorted_definitions));
|
||||
|
||||
for_array(i, c->info.definitions) {
|
||||
Entity *e = c->info.definitions[i];
|
||||
String name = e->token.string;
|
||||
if (is_blank_ident(name)) {
|
||||
continue;
|
||||
}
|
||||
if ((e->scope->flags & (ScopeFlag_Pkg|ScopeFlag_File)) == 0) {
|
||||
continue;
|
||||
}
|
||||
if (e->parent_proc_decl != nullptr) {
|
||||
continue;
|
||||
}
|
||||
switch (e->kind) {
|
||||
case Entity_Builtin:
|
||||
case Entity_Nil:
|
||||
case Entity_Label:
|
||||
continue;
|
||||
}
|
||||
if (e->pkg == nullptr) {
|
||||
continue;
|
||||
}
|
||||
if (e->token.pos.line == 0) {
|
||||
continue;
|
||||
}
|
||||
if (e->kind == Entity_Procedure) {
|
||||
Type *t = base_type(e->type);
|
||||
if (t->kind != Type_Proc) {
|
||||
continue;
|
||||
}
|
||||
if (t->Proc.is_poly_specialized) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (e->kind == Entity_TypeName) {
|
||||
Type *t = base_type(e->type);
|
||||
if (t->kind == Type_Struct) {
|
||||
if (t->Struct.is_poly_specialized) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (t->kind == Type_Union) {
|
||||
if (t->Union.is_poly_specialized) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
array_add(&sorted_definitions, e);
|
||||
}
|
||||
|
||||
gb_sort_array(sorted_definitions.data, sorted_definitions.count, query_data_definition_compare);
|
||||
definitions->reserve(sorted_definitions.count);
|
||||
|
||||
for_array(i, sorted_definitions) {
|
||||
Entity *e = sorted_definitions[i];
|
||||
String name = e->token.string;
|
||||
|
||||
auto *def = query_value_map();
|
||||
def->reserve(16);
|
||||
definitions->add(def);
|
||||
|
||||
def->add("package", e->pkg->name);
|
||||
def->add("name", name);
|
||||
def->add("filepath", e->token.pos.file);
|
||||
def->add("line", e->token.pos.line);
|
||||
def->add("column", e->token.pos.column);
|
||||
def->add("file_offset", e->token.pos.offset);
|
||||
|
||||
switch (e->kind) {
|
||||
case Entity_Constant: def->add("kind", str_lit("constant")); break;
|
||||
case Entity_Variable: def->add("kind", str_lit("variable")); break;
|
||||
case Entity_TypeName: def->add("kind", str_lit("type name")); break;
|
||||
case Entity_Procedure: def->add("kind", str_lit("procedure")); break;
|
||||
case Entity_ProcGroup: def->add("kind", str_lit("procedure group")); break;
|
||||
case Entity_ImportName: def->add("kind", str_lit("import name")); break;
|
||||
case Entity_LibraryName: def->add("kind", str_lit("library name")); break;
|
||||
default: GB_PANIC("Invalid entity kind to be added");
|
||||
}
|
||||
|
||||
|
||||
if (e->type != nullptr && e->type != t_invalid) {
|
||||
Type *t = e->type;
|
||||
Type *bt = t;
|
||||
|
||||
switch (e->kind) {
|
||||
case Entity_TypeName:
|
||||
if (!e->TypeName.is_type_alias) {
|
||||
bt = base_type(t);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
gbString str = type_to_string(t);
|
||||
String type_str = make_string(cast(u8 *)str, gb_string_length(str));
|
||||
def->add("type", type_str);
|
||||
}
|
||||
if (t != bt) {
|
||||
gbString str = type_to_string(bt);
|
||||
String type_str = make_string(cast(u8 *)str, gb_string_length(str));
|
||||
def->add("base_type", type_str);
|
||||
}
|
||||
{
|
||||
String type_kind = {};
|
||||
Type *bt = base_type(t);
|
||||
switch (bt->kind) {
|
||||
case Type_Pointer: type_kind = str_lit("pointer"); break;
|
||||
case Type_Opaque: type_kind = str_lit("opaque"); break;
|
||||
case Type_Array: type_kind = str_lit("array"); break;
|
||||
case Type_Slice: type_kind = str_lit("slice"); break;
|
||||
case Type_DynamicArray: type_kind = str_lit("dynamic array"); break;
|
||||
case Type_Map: type_kind = str_lit("map"); break;
|
||||
case Type_Struct: type_kind = str_lit("struct"); break;
|
||||
case Type_Union: type_kind = str_lit("union"); break;
|
||||
case Type_Enum: type_kind = str_lit("enum"); break;
|
||||
case Type_Proc: type_kind = str_lit("procedure"); break;
|
||||
case Type_BitField: type_kind = str_lit("bit field"); break;
|
||||
case Type_BitSet: type_kind = str_lit("bit set"); break;
|
||||
case Type_SimdVector: type_kind = str_lit("simd vector"); break;
|
||||
|
||||
case Type_Generic:
|
||||
case Type_Tuple:
|
||||
case Type_BitFieldValue:
|
||||
GB_PANIC("Invalid definition type");
|
||||
break;
|
||||
}
|
||||
if (type_kind.len > 0) {
|
||||
def->add("type_kind", type_kind);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (e->kind == Entity_TypeName) {
|
||||
def->add("size", type_size_of(e->type));
|
||||
def->add("align", type_align_of(e->type));
|
||||
|
||||
|
||||
if (is_type_struct(e->type)) {
|
||||
auto *data = query_value_map();
|
||||
data->reserve(6);
|
||||
|
||||
def->add("data", data);
|
||||
|
||||
Type *t = base_type(e->type);
|
||||
GB_ASSERT(t->kind == Type_Struct);
|
||||
|
||||
if (t->Struct.is_polymorphic) {
|
||||
data->add("polymorphic", t->Struct.is_polymorphic);
|
||||
}
|
||||
if (t->Struct.is_poly_specialized) {
|
||||
data->add("polymorphic_specialized", t->Struct.is_poly_specialized);
|
||||
}
|
||||
if (t->Struct.is_packed) {
|
||||
data->add("packed", t->Struct.is_packed);
|
||||
}
|
||||
if (t->Struct.is_raw_union) {
|
||||
data->add("raw_union", t->Struct.is_raw_union);
|
||||
}
|
||||
|
||||
auto *fields = query_value_array();
|
||||
data->add("fields", fields);
|
||||
fields->reserve(t->Struct.fields.count);
|
||||
fields->packed = true;
|
||||
|
||||
for_array(j, t->Struct.fields) {
|
||||
Entity *e = t->Struct.fields[j];
|
||||
String name = e->token.string;
|
||||
if (is_blank_ident(name)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
fields->add(name);
|
||||
}
|
||||
} else if (is_type_union(e->type)) {
|
||||
auto *data = query_value_map();
|
||||
data->reserve(4);
|
||||
|
||||
def->add("data", data);
|
||||
Type *t = base_type(e->type);
|
||||
GB_ASSERT(t->kind == Type_Union);
|
||||
|
||||
if (t->Union.is_polymorphic) {
|
||||
data->add("polymorphic", t->Union.is_polymorphic);
|
||||
}
|
||||
if (t->Union.is_poly_specialized) {
|
||||
data->add("polymorphic_specialized", t->Union.is_poly_specialized);
|
||||
}
|
||||
|
||||
auto *variants = query_value_array();
|
||||
variants->reserve(t->Union.variants.count);
|
||||
data->add("variants", variants);
|
||||
|
||||
for_array(j, t->Union.variants) {
|
||||
Type *vt = t->Union.variants[j];
|
||||
|
||||
gbString str = type_to_string(vt);
|
||||
String type_str = make_string(cast(u8 *)str, gb_string_length(str));
|
||||
variants->add(type_str);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (e->kind == Entity_Procedure) {
|
||||
Type *t = base_type(e->type);
|
||||
GB_ASSERT(t->kind == Type_Proc);
|
||||
|
||||
bool is_polymorphic = t->Proc.is_polymorphic;
|
||||
bool is_poly_specialized = t->Proc.is_poly_specialized;
|
||||
bool ok = is_polymorphic || is_poly_specialized;
|
||||
if (ok) {
|
||||
auto *data = query_value_map();
|
||||
data->reserve(4);
|
||||
|
||||
def->add("data", data);
|
||||
if (is_polymorphic) {
|
||||
data->add("polymorphic", is_polymorphic);
|
||||
}
|
||||
if (is_poly_specialized) {
|
||||
data->add("polymorphic_specialized", is_poly_specialized);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (e->kind == Entity_ProcGroup) {
|
||||
auto *procedures = query_value_array();
|
||||
procedures->reserve(e->ProcGroup.entities.count);
|
||||
|
||||
for_array(j, e->ProcGroup.entities) {
|
||||
Entity *p = e->ProcGroup.entities[j];
|
||||
|
||||
auto *procedure = query_value_map();
|
||||
procedure->reserve(2);
|
||||
procedure->packed = true;
|
||||
|
||||
procedures->add(procedure);
|
||||
|
||||
procedure->add("package", p->pkg->name);
|
||||
procedure->add("name", p->token.string);
|
||||
}
|
||||
def->add("procedures", procedures);
|
||||
}
|
||||
|
||||
DeclInfo *di = e->decl_info;
|
||||
if (di != nullptr) {
|
||||
if (di->is_using) {
|
||||
def->add("using", query_value_boolean(true));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (build_context.show_timings) {
|
||||
Timings *t = timings;
|
||||
timings__stop_current_section(t);
|
||||
t->total.finish = time_stamp_time_now();
|
||||
isize max_len = gb_min(36, t->total.label.len);
|
||||
for_array(i, t->sections) {
|
||||
TimeStamp ts = t->sections[i];
|
||||
max_len = gb_max(max_len, ts.label.len);
|
||||
}
|
||||
t->total_time_seconds = time_stamp_as_s(t->total, t->freq);
|
||||
|
||||
auto *tims = query_value_map();
|
||||
tims->reserve(8);
|
||||
root->add("timings", tims);
|
||||
tims->add("time_unit", str_lit("s"));
|
||||
|
||||
tims->add(t->total.label, cast(f64)t->total_time_seconds);
|
||||
|
||||
|
||||
Parser *p = c->parser;
|
||||
if (p != nullptr) {
|
||||
isize lines = p->total_line_count;
|
||||
isize tokens = p->total_token_count;
|
||||
isize files = 0;
|
||||
isize packages = p->packages.count;
|
||||
isize total_file_size = 0;
|
||||
for_array(i, p->packages) {
|
||||
files += p->packages[i]->files.count;
|
||||
for_array(j, p->packages[i]->files) {
|
||||
AstFile *file = p->packages[i]->files[j];
|
||||
total_file_size += file->tokenizer.end - file->tokenizer.start;
|
||||
}
|
||||
}
|
||||
|
||||
tims->add("total_lines", lines);
|
||||
tims->add("total_tokens", tokens);
|
||||
tims->add("total_files", files);
|
||||
tims->add("total_packages", packages);
|
||||
tims->add("total_file_size", total_file_size);
|
||||
|
||||
auto *sections = query_value_map();
|
||||
sections->reserve(t->sections.count);
|
||||
tims->add("sections", sections);
|
||||
for_array(i, t->sections) {
|
||||
TimeStamp ts = t->sections[i];
|
||||
f64 section_time = time_stamp_as_s(ts, t->freq);
|
||||
|
||||
auto *section = query_value_map();
|
||||
section->reserve(2);
|
||||
sections->add(ts.label, section);
|
||||
section->add("time", section_time);
|
||||
section->add("total_fraction", section_time/t->total_time_seconds);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
print_query_data_as_json(root, !build_context.print_query_data_compact);
|
||||
gb_printf("\n");
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
i32 exec_llvm_opt(String output_base) {
|
||||
#if defined(GB_SYSTEM_WINDOWS)
|
||||
@@ -1378,7 +989,7 @@ int main(int arg_count, char **arg_ptr) {
|
||||
return 1;
|
||||
}
|
||||
build_context.no_output_files = true;
|
||||
build_context.print_query_data = true;
|
||||
build_context.query_data_set_settings.ok = true;
|
||||
init_filename = args[2];
|
||||
} else if (command == "docs") {
|
||||
if (args.count < 3) {
|
||||
@@ -1455,7 +1066,7 @@ int main(int arg_count, char **arg_ptr) {
|
||||
|
||||
|
||||
if (build_context.no_output_files) {
|
||||
if (build_context.print_query_data) {
|
||||
if (build_context.query_data_set_settings.ok) {
|
||||
generate_and_print_query_data(&checker, &timings);
|
||||
} else {
|
||||
if (build_context.show_timings) {
|
||||
|
||||
@@ -359,3 +359,678 @@ void print_query_data_as_json(QueryValue *value, bool format = true, isize inden
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
int query_data_package_compare(void const *a, void const *b) {
|
||||
AstPackage *x = *cast(AstPackage *const *)a;
|
||||
AstPackage *y = *cast(AstPackage *const *)b;
|
||||
|
||||
if (x == y) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (x != nullptr && y != nullptr) {
|
||||
return string_compare(x->name, y->name);
|
||||
} else if (x != nullptr && y == nullptr) {
|
||||
return -1;
|
||||
} else if (x == nullptr && y != nullptr) {
|
||||
return +1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int query_data_definition_compare(void const *a, void const *b) {
|
||||
Entity *x = *cast(Entity *const *)a;
|
||||
Entity *y = *cast(Entity *const *)b;
|
||||
|
||||
if (x == y) {
|
||||
return 0;
|
||||
} else if (x != nullptr && y == nullptr) {
|
||||
return -1;
|
||||
} else if (x == nullptr && y != nullptr) {
|
||||
return +1;
|
||||
}
|
||||
|
||||
if (x->pkg != y->pkg) {
|
||||
i32 res = query_data_package_compare(&x->pkg, &y->pkg);
|
||||
if (res != 0) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
return string_compare(x->token.string, y->token.string);
|
||||
}
|
||||
|
||||
int entity_name_compare(void const *a, void const *b) {
|
||||
Entity *x = *cast(Entity *const *)a;
|
||||
Entity *y = *cast(Entity *const *)b;
|
||||
if (x == y) {
|
||||
return 0;
|
||||
} else if (x != nullptr && y == nullptr) {
|
||||
return -1;
|
||||
} else if (x == nullptr && y != nullptr) {
|
||||
return +1;
|
||||
}
|
||||
return string_compare(x->token.string, y->token.string);
|
||||
}
|
||||
|
||||
|
||||
void generate_and_print_query_data_global_definitions(Checker *c, Timings *timings);
|
||||
void generate_and_print_query_data_go_to_definitions(Checker *c);
|
||||
|
||||
void generate_and_print_query_data(Checker *c, Timings *timings) {
|
||||
query_value_allocator = heap_allocator();
|
||||
switch (build_context.query_data_set_settings.kind) {
|
||||
case QueryDataSet_GlobalDefinitions:
|
||||
generate_and_print_query_data_global_definitions(c, timings);
|
||||
return;
|
||||
case QueryDataSet_GoToDefinitions:
|
||||
generate_and_print_query_data_go_to_definitions(c);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void generate_and_print_query_data_global_definitions(Checker *c, Timings *timings) {
|
||||
auto *root = query_value_map();
|
||||
|
||||
if (global_error_collector.errors.count > 0) {
|
||||
auto *errors = query_value_array();
|
||||
root->add("errors", errors);
|
||||
for_array(i, global_error_collector.errors) {
|
||||
String err = string_trim_whitespace(global_error_collector.errors[i]);
|
||||
errors->add(err);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
{ // Packages
|
||||
auto *packages = query_value_array();
|
||||
root->add("packages", packages);
|
||||
|
||||
auto sorted_packages = array_make<AstPackage *>(query_value_allocator, 0, c->info.packages.entries.count);
|
||||
defer (array_free(&sorted_packages));
|
||||
|
||||
for_array(i, c->info.packages.entries) {
|
||||
AstPackage *pkg = c->info.packages.entries[i].value;
|
||||
if (pkg != nullptr) {
|
||||
array_add(&sorted_packages, pkg);
|
||||
}
|
||||
}
|
||||
gb_sort_array(sorted_packages.data, sorted_packages.count, query_data_package_compare);
|
||||
packages->reserve(sorted_packages.count);
|
||||
|
||||
for_array(i, sorted_packages) {
|
||||
AstPackage *pkg = sorted_packages[i];
|
||||
String name = pkg->name;
|
||||
String fullpath = pkg->fullpath;
|
||||
|
||||
auto *files = query_value_array();
|
||||
files->reserve(pkg->files.count);
|
||||
for_array(j, pkg->files) {
|
||||
AstFile *f = pkg->files[j];
|
||||
files->add(f->fullpath);
|
||||
}
|
||||
|
||||
auto *package = query_value_map();
|
||||
package->reserve(3);
|
||||
packages->add(package);
|
||||
|
||||
package->add("name", pkg->name);
|
||||
package->add("fullpath", pkg->fullpath);
|
||||
package->add("files", files);
|
||||
}
|
||||
}
|
||||
|
||||
if (c->info.definitions.count > 0) {
|
||||
auto *definitions = query_value_array();
|
||||
root->add("definitions", definitions);
|
||||
|
||||
auto sorted_definitions = array_make<Entity *>(query_value_allocator, 0, c->info.definitions.count);
|
||||
defer (array_free(&sorted_definitions));
|
||||
|
||||
for_array(i, c->info.definitions) {
|
||||
Entity *e = c->info.definitions[i];
|
||||
String name = e->token.string;
|
||||
if (is_blank_ident(name)) {
|
||||
continue;
|
||||
}
|
||||
if ((e->scope->flags & (ScopeFlag_Pkg|ScopeFlag_File)) == 0) {
|
||||
continue;
|
||||
}
|
||||
if (e->parent_proc_decl != nullptr) {
|
||||
continue;
|
||||
}
|
||||
switch (e->kind) {
|
||||
case Entity_Builtin:
|
||||
case Entity_Nil:
|
||||
case Entity_Label:
|
||||
continue;
|
||||
}
|
||||
if (e->pkg == nullptr) {
|
||||
continue;
|
||||
}
|
||||
if (e->token.pos.line == 0) {
|
||||
continue;
|
||||
}
|
||||
if (e->kind == Entity_Procedure) {
|
||||
Type *t = base_type(e->type);
|
||||
if (t->kind != Type_Proc) {
|
||||
continue;
|
||||
}
|
||||
if (t->Proc.is_poly_specialized) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (e->kind == Entity_TypeName) {
|
||||
Type *t = base_type(e->type);
|
||||
if (t->kind == Type_Struct) {
|
||||
if (t->Struct.is_poly_specialized) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (t->kind == Type_Union) {
|
||||
if (t->Union.is_poly_specialized) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
array_add(&sorted_definitions, e);
|
||||
}
|
||||
|
||||
gb_sort_array(sorted_definitions.data, sorted_definitions.count, query_data_definition_compare);
|
||||
definitions->reserve(sorted_definitions.count);
|
||||
|
||||
for_array(i, sorted_definitions) {
|
||||
Entity *e = sorted_definitions[i];
|
||||
String name = e->token.string;
|
||||
|
||||
auto *def = query_value_map();
|
||||
def->reserve(16);
|
||||
definitions->add(def);
|
||||
|
||||
def->add("package", e->pkg->name);
|
||||
def->add("name", name);
|
||||
def->add("filepath", e->token.pos.file);
|
||||
def->add("line", e->token.pos.line);
|
||||
def->add("column", e->token.pos.column);
|
||||
def->add("file_offset", e->token.pos.offset);
|
||||
|
||||
switch (e->kind) {
|
||||
case Entity_Constant: def->add("kind", str_lit("constant")); break;
|
||||
case Entity_Variable: def->add("kind", str_lit("variable")); break;
|
||||
case Entity_TypeName: def->add("kind", str_lit("type name")); break;
|
||||
case Entity_Procedure: def->add("kind", str_lit("procedure")); break;
|
||||
case Entity_ProcGroup: def->add("kind", str_lit("procedure group")); break;
|
||||
case Entity_ImportName: def->add("kind", str_lit("import name")); break;
|
||||
case Entity_LibraryName: def->add("kind", str_lit("library name")); break;
|
||||
default: GB_PANIC("Invalid entity kind to be added");
|
||||
}
|
||||
|
||||
|
||||
if (e->type != nullptr && e->type != t_invalid) {
|
||||
Type *t = e->type;
|
||||
Type *bt = t;
|
||||
|
||||
switch (e->kind) {
|
||||
case Entity_TypeName:
|
||||
if (!e->TypeName.is_type_alias) {
|
||||
bt = base_type(t);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
gbString str = type_to_string(t);
|
||||
String type_str = make_string(cast(u8 *)str, gb_string_length(str));
|
||||
def->add("type", type_str);
|
||||
}
|
||||
if (t != bt) {
|
||||
gbString str = type_to_string(bt);
|
||||
String type_str = make_string(cast(u8 *)str, gb_string_length(str));
|
||||
def->add("base_type", type_str);
|
||||
}
|
||||
{
|
||||
String type_kind = {};
|
||||
Type *bt = base_type(t);
|
||||
switch (bt->kind) {
|
||||
case Type_Pointer: type_kind = str_lit("pointer"); break;
|
||||
case Type_Opaque: type_kind = str_lit("opaque"); break;
|
||||
case Type_Array: type_kind = str_lit("array"); break;
|
||||
case Type_Slice: type_kind = str_lit("slice"); break;
|
||||
case Type_DynamicArray: type_kind = str_lit("dynamic array"); break;
|
||||
case Type_Map: type_kind = str_lit("map"); break;
|
||||
case Type_Struct: type_kind = str_lit("struct"); break;
|
||||
case Type_Union: type_kind = str_lit("union"); break;
|
||||
case Type_Enum: type_kind = str_lit("enum"); break;
|
||||
case Type_Proc: type_kind = str_lit("procedure"); break;
|
||||
case Type_BitField: type_kind = str_lit("bit field"); break;
|
||||
case Type_BitSet: type_kind = str_lit("bit set"); break;
|
||||
case Type_SimdVector: type_kind = str_lit("simd vector"); break;
|
||||
|
||||
case Type_Generic:
|
||||
case Type_Tuple:
|
||||
case Type_BitFieldValue:
|
||||
GB_PANIC("Invalid definition type");
|
||||
break;
|
||||
}
|
||||
if (type_kind.len > 0) {
|
||||
def->add("type_kind", type_kind);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (e->kind == Entity_TypeName) {
|
||||
def->add("size", type_size_of(e->type));
|
||||
def->add("align", type_align_of(e->type));
|
||||
|
||||
|
||||
if (is_type_struct(e->type)) {
|
||||
auto *data = query_value_map();
|
||||
data->reserve(6);
|
||||
|
||||
def->add("data", data);
|
||||
|
||||
Type *t = base_type(e->type);
|
||||
GB_ASSERT(t->kind == Type_Struct);
|
||||
|
||||
if (t->Struct.is_polymorphic) {
|
||||
data->add("polymorphic", t->Struct.is_polymorphic);
|
||||
}
|
||||
if (t->Struct.is_poly_specialized) {
|
||||
data->add("polymorphic_specialized", t->Struct.is_poly_specialized);
|
||||
}
|
||||
if (t->Struct.is_packed) {
|
||||
data->add("packed", t->Struct.is_packed);
|
||||
}
|
||||
if (t->Struct.is_raw_union) {
|
||||
data->add("raw_union", t->Struct.is_raw_union);
|
||||
}
|
||||
|
||||
auto *fields = query_value_array();
|
||||
data->add("fields", fields);
|
||||
fields->reserve(t->Struct.fields.count);
|
||||
fields->packed = true;
|
||||
|
||||
for_array(j, t->Struct.fields) {
|
||||
Entity *e = t->Struct.fields[j];
|
||||
String name = e->token.string;
|
||||
if (is_blank_ident(name)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
fields->add(name);
|
||||
}
|
||||
} else if (is_type_union(e->type)) {
|
||||
auto *data = query_value_map();
|
||||
data->reserve(4);
|
||||
|
||||
def->add("data", data);
|
||||
Type *t = base_type(e->type);
|
||||
GB_ASSERT(t->kind == Type_Union);
|
||||
|
||||
if (t->Union.is_polymorphic) {
|
||||
data->add("polymorphic", t->Union.is_polymorphic);
|
||||
}
|
||||
if (t->Union.is_poly_specialized) {
|
||||
data->add("polymorphic_specialized", t->Union.is_poly_specialized);
|
||||
}
|
||||
|
||||
auto *variants = query_value_array();
|
||||
variants->reserve(t->Union.variants.count);
|
||||
data->add("variants", variants);
|
||||
|
||||
for_array(j, t->Union.variants) {
|
||||
Type *vt = t->Union.variants[j];
|
||||
|
||||
gbString str = type_to_string(vt);
|
||||
String type_str = make_string(cast(u8 *)str, gb_string_length(str));
|
||||
variants->add(type_str);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (e->kind == Entity_Procedure) {
|
||||
Type *t = base_type(e->type);
|
||||
GB_ASSERT(t->kind == Type_Proc);
|
||||
|
||||
bool is_polymorphic = t->Proc.is_polymorphic;
|
||||
bool is_poly_specialized = t->Proc.is_poly_specialized;
|
||||
bool ok = is_polymorphic || is_poly_specialized;
|
||||
if (ok) {
|
||||
auto *data = query_value_map();
|
||||
data->reserve(4);
|
||||
|
||||
def->add("data", data);
|
||||
if (is_polymorphic) {
|
||||
data->add("polymorphic", is_polymorphic);
|
||||
}
|
||||
if (is_poly_specialized) {
|
||||
data->add("polymorphic_specialized", is_poly_specialized);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (e->kind == Entity_ProcGroup) {
|
||||
auto *procedures = query_value_array();
|
||||
procedures->reserve(e->ProcGroup.entities.count);
|
||||
|
||||
for_array(j, e->ProcGroup.entities) {
|
||||
Entity *p = e->ProcGroup.entities[j];
|
||||
|
||||
auto *procedure = query_value_map();
|
||||
procedure->reserve(2);
|
||||
procedure->packed = true;
|
||||
|
||||
procedures->add(procedure);
|
||||
|
||||
procedure->add("package", p->pkg->name);
|
||||
procedure->add("name", p->token.string);
|
||||
}
|
||||
def->add("procedures", procedures);
|
||||
}
|
||||
|
||||
DeclInfo *di = e->decl_info;
|
||||
if (di != nullptr) {
|
||||
if (di->is_using) {
|
||||
def->add("using", query_value_boolean(true));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (build_context.show_timings) {
|
||||
Timings *t = timings;
|
||||
timings__stop_current_section(t);
|
||||
t->total.finish = time_stamp_time_now();
|
||||
isize max_len = gb_min(36, t->total.label.len);
|
||||
for_array(i, t->sections) {
|
||||
TimeStamp ts = t->sections[i];
|
||||
max_len = gb_max(max_len, ts.label.len);
|
||||
}
|
||||
t->total_time_seconds = time_stamp_as_s(t->total, t->freq);
|
||||
|
||||
auto *tims = query_value_map();
|
||||
tims->reserve(8);
|
||||
root->add("timings", tims);
|
||||
tims->add("time_unit", str_lit("s"));
|
||||
|
||||
tims->add(t->total.label, cast(f64)t->total_time_seconds);
|
||||
|
||||
|
||||
Parser *p = c->parser;
|
||||
if (p != nullptr) {
|
||||
isize lines = p->total_line_count;
|
||||
isize tokens = p->total_token_count;
|
||||
isize files = 0;
|
||||
isize packages = p->packages.count;
|
||||
isize total_file_size = 0;
|
||||
for_array(i, p->packages) {
|
||||
files += p->packages[i]->files.count;
|
||||
for_array(j, p->packages[i]->files) {
|
||||
AstFile *file = p->packages[i]->files[j];
|
||||
total_file_size += file->tokenizer.end - file->tokenizer.start;
|
||||
}
|
||||
}
|
||||
|
||||
tims->add("total_lines", lines);
|
||||
tims->add("total_tokens", tokens);
|
||||
tims->add("total_files", files);
|
||||
tims->add("total_packages", packages);
|
||||
tims->add("total_file_size", total_file_size);
|
||||
|
||||
auto *sections = query_value_map();
|
||||
sections->reserve(t->sections.count);
|
||||
tims->add("sections", sections);
|
||||
for_array(i, t->sections) {
|
||||
TimeStamp ts = t->sections[i];
|
||||
f64 section_time = time_stamp_as_s(ts, t->freq);
|
||||
|
||||
auto *section = query_value_map();
|
||||
section->reserve(2);
|
||||
sections->add(ts.label, section);
|
||||
section->add("time", section_time);
|
||||
section->add("total_fraction", section_time/t->total_time_seconds);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
print_query_data_as_json(root, !build_context.query_data_set_settings.compact);
|
||||
gb_printf("\n");
|
||||
}
|
||||
|
||||
|
||||
|
||||
template <typename T>
|
||||
struct BinaryArray {
|
||||
u32 offset; // Offset in bytes from the top of the file
|
||||
u32 length; // Number of elements in array of type T
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
Array<T> binary_array_from_data(BinaryArray<T> ba, void *data) {
|
||||
Array<T> res = {};
|
||||
res.data = cast(T *)(cast(u8 *)data + ba.offset);
|
||||
res.count = ba.length;
|
||||
res.capacity = ba.length;
|
||||
return res;
|
||||
}
|
||||
|
||||
typedef BinaryArray<u8> BinaryString;
|
||||
|
||||
struct GoToDefIdent {
|
||||
u64 use_offset; // offset of identifier use in bytes from the start of the file that contains it
|
||||
u32 len; // length in bytes of the identifier
|
||||
u32 def_file_id;
|
||||
u64 def_offset; // offset of entity definition in bytes from the start of the file that contains it
|
||||
};
|
||||
|
||||
struct GoToDefFile {
|
||||
u32 id;
|
||||
BinaryString path;
|
||||
BinaryArray<GoToDefIdent> idents;
|
||||
};
|
||||
|
||||
struct GoToDefHeader {
|
||||
u8 magic[4]; // ogtd (odin-go-to-definitions)
|
||||
u32 version; // 1
|
||||
BinaryArray<GoToDefFile> files;
|
||||
};
|
||||
|
||||
struct GoToDefFileMap {
|
||||
AstFile *f;
|
||||
u32 id;
|
||||
Array<Ast *> idents;
|
||||
};
|
||||
|
||||
|
||||
int go_to_def_file_map_compare(void const *a, void const *b) {
|
||||
GoToDefFileMap const *x = cast(GoToDefFileMap const *)a;
|
||||
GoToDefFileMap const *y = cast(GoToDefFileMap const *)b;
|
||||
if (x == y) {
|
||||
return 0;
|
||||
} else if (x != nullptr && y == nullptr) {
|
||||
return -1;
|
||||
} else if (x == nullptr && y != nullptr) {
|
||||
return +1;
|
||||
}
|
||||
if (x->f->id < y->f->id) {
|
||||
return -1;
|
||||
} else if (x->f->id > y->f->id) {
|
||||
return +1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int quick_ident_compare(void const *a, void const *b) {
|
||||
Ast *x = *cast(Ast **)a;
|
||||
Ast *y = *cast(Ast **)b;
|
||||
|
||||
// NOTE(bill): This assumes that the file is same
|
||||
if (x->Ident.token.pos.offset < y->Ident.token.pos.offset) {
|
||||
return -1;
|
||||
} else if (x->Ident.token.pos.offset > y->Ident.token.pos.offset) {
|
||||
return +1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void generate_and_print_query_data_go_to_definitions(Checker *c) {
|
||||
GB_ASSERT(c->info.allow_identifier_uses);
|
||||
|
||||
gbAllocator a = query_value_allocator;
|
||||
|
||||
isize file_path_memory_needed = 0;
|
||||
auto files = array_make<GoToDefFileMap>(a, 0, c->info.files.entries.count);
|
||||
for_array(i, c->info.files.entries) {
|
||||
AstFile *f = c->info.files.entries[i].value;
|
||||
file_path_memory_needed += f->fullpath.len+1; // add NUL terminator
|
||||
|
||||
|
||||
GoToDefFileMap x = {};
|
||||
x.f = f;
|
||||
array_init(&x.idents, a);
|
||||
array_add(&files, x);
|
||||
}
|
||||
gb_sort_array(files.data, files.count, go_to_def_file_map_compare);
|
||||
|
||||
auto file_id_map_to_index = array_make<isize>(a, files[files.count-1].f->id + 1);
|
||||
for_array(i, file_id_map_to_index) {
|
||||
file_id_map_to_index[i] = -1;
|
||||
}
|
||||
for_array(i, files) {
|
||||
file_id_map_to_index[files[i].f->id] = i;
|
||||
}
|
||||
|
||||
|
||||
|
||||
for_array(i, c->info.identifier_uses) {
|
||||
Ast *ast = c->info.identifier_uses[i];
|
||||
GB_ASSERT(ast->kind == Ast_Ident);
|
||||
TokenPos pos = ast->Ident.token.pos;
|
||||
Entity *e = ast->Ident.entity;
|
||||
if (e == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
AstFile **use_file_found = map_get(&c->info.files, hash_string(pos.file));
|
||||
GB_ASSERT(use_file_found != nullptr);
|
||||
AstFile *use_file = *use_file_found;
|
||||
GB_ASSERT(use_file != nullptr);
|
||||
|
||||
if (e->scope == nullptr) {
|
||||
GB_ASSERT(e->flags & EntityFlag_Field);
|
||||
continue;
|
||||
}
|
||||
if (e->scope->flags & ScopeFlag_Global) {
|
||||
continue;
|
||||
}
|
||||
|
||||
isize idx = file_id_map_to_index[use_file->id];
|
||||
if (idx >= 0) {
|
||||
array_add(&files[idx].idents, ast);
|
||||
} else {
|
||||
// TODO(bill): Handle invalid map case?
|
||||
}
|
||||
}
|
||||
|
||||
for_array(i, files) {
|
||||
GoToDefFileMap *f = &files[i];
|
||||
gb_sort_array(f->idents.data, f->idents.count, quick_ident_compare);
|
||||
// gb_printf_err("%lld %.*s -> %lld\n", f->f->id, LIT(f->f->fullpath), f->idents.count);
|
||||
}
|
||||
|
||||
|
||||
|
||||
isize data_min_size = 0;
|
||||
|
||||
u32 header_offset = cast(u32)data_min_size;
|
||||
data_min_size += gb_size_of(GoToDefHeader);
|
||||
data_min_size = align_formula_isize(data_min_size, 8);
|
||||
|
||||
u32 file_offset = cast(u32)data_min_size;
|
||||
data_min_size += gb_size_of(GoToDefFile) * files.count;
|
||||
data_min_size = align_formula_isize(data_min_size, 8);
|
||||
|
||||
u32 file_path_offset = cast(u32)data_min_size;
|
||||
data_min_size += file_path_memory_needed;
|
||||
data_min_size = align_formula_isize(data_min_size, 8);
|
||||
|
||||
u32 idents_offset = cast(u32)data_min_size;
|
||||
data_min_size += gb_size_of(GoToDefIdent) * c->info.identifier_uses.count;
|
||||
|
||||
|
||||
auto data = array_make<u8>(a, 0, data_min_size);
|
||||
defer (array_free(&data));
|
||||
|
||||
GoToDefHeader header = {};
|
||||
gb_memmove(header.magic, "ogtd", 4);
|
||||
header.version = 1;
|
||||
header.files.length = cast(u32)files.count;
|
||||
header.files.offset = file_offset;
|
||||
|
||||
array_add_elems(&data, cast(u8 *)&header, gb_size_of(header));
|
||||
|
||||
array_resize(&data, data_min_size);
|
||||
|
||||
auto binary_files = binary_array_from_data(header.files, data.data);
|
||||
|
||||
u32 file_path_offset_index = file_path_offset;
|
||||
u32 idents_offset_index = idents_offset;
|
||||
for_array(i, files) {
|
||||
GoToDefFileMap *f_map = &files[i];
|
||||
AstFile *f = f_map->f;
|
||||
binary_files[i].id = cast(u32)f->id;
|
||||
|
||||
binary_files[i].path.offset = file_path_offset_index;
|
||||
binary_files[i].path.length = cast(u32)f->fullpath.len;
|
||||
|
||||
binary_files[i].idents.offset = idents_offset_index;
|
||||
binary_files[i].idents.length = cast(u32)f_map->idents.count;
|
||||
|
||||
auto path = binary_array_from_data(binary_files[i].path, data.data);
|
||||
gb_memmove(path.data, f->fullpath.text, f->fullpath.len);
|
||||
path.data[f->fullpath.len] = 0;
|
||||
|
||||
|
||||
auto idents = binary_array_from_data(binary_files[i].idents, data.data);
|
||||
for_array(j, f_map->idents) {
|
||||
Ast *ast = f_map->idents[j];
|
||||
GB_ASSERT(ast->kind == Ast_Ident);
|
||||
|
||||
Entity *e = ast->Ident.entity;
|
||||
TokenPos def = e->token.pos;
|
||||
AstFile *def_file = e->file;
|
||||
|
||||
if (def_file == nullptr) {
|
||||
auto *def_file_found = map_get(&c->info.files, hash_string(e->token.pos.file));
|
||||
if (def_file_found == nullptr) {
|
||||
continue;
|
||||
}
|
||||
def_file = *def_file_found;
|
||||
}
|
||||
|
||||
isize file_index = file_id_map_to_index[def_file->id];
|
||||
GB_ASSERT(file_index >= 0);
|
||||
|
||||
idents[j].use_offset = cast(u64)ast->Ident.token.pos.offset;
|
||||
idents[j].len = cast(u32)ast->Ident.token.string.len;
|
||||
idents[j].def_file_id = cast(u32)def_file->id;
|
||||
idents[j].def_offset = cast(u64)e->token.pos.offset;
|
||||
|
||||
// gb_printf_err("%llu %llu %llu %llu\n", idents[j].len, idents[j].use_offset, idents[j].def_file_id, idents[j].def_offset);
|
||||
}
|
||||
|
||||
file_path_offset_index += cast(u32)(f->fullpath.len + 1);
|
||||
idents_offset_index += cast(u32)(f_map->idents.count * gb_size_of(GoToDefIdent));
|
||||
}
|
||||
|
||||
|
||||
gb_file_write(gb_file_get_standard(gbFileStandard_Output), data.data, data.count*gb_size_of(*data.data));
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user