diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 74f542ca..54d4d787 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -88,6 +88,10 @@ jobs: - name: secrets-scanner type: go path: PROJECTS/intermediate/secrets-scanner + # Nim + - name: credential-enumeration + type: nim + path: PROJECTS/intermediate/credential-enumeration defaults: run: @@ -147,6 +151,17 @@ jobs: if: matrix.type == 'go' run: go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@latest + # Nim Setup + - name: Setup Nim + if: matrix.type == 'nim' + uses: jiro4989/setup-nim-action@v2 + with: + nim-version: '2.2.x' + + - name: Install nph + if: matrix.type == 'nim' + run: nimble install -y nph + # Ruff Linting - name: Run ruff if: matrix.type == 'ruff' @@ -195,6 +210,37 @@ jobs: cat golangci-output.txt continue-on-error: true + # Nim Linting + - name: Run nph and nim check + if: matrix.type == 'nim' + id: nim + run: | + echo "Running nph format check..." + NPH_OK=true + NIM_OK=true + if nph --check src/ > nim-output.txt 2>&1; then + echo "nph: no formatting issues" + else + NPH_OK=false + echo "nph: formatting issues found" + fi + echo "Running nim check..." + if nim check src/harvester.nim >> nim-output.txt 2>&1; then + echo "nim check: passed" + else + NIM_OK=false + echo "nim check: failed" + fi + if [[ "$NPH_OK" == "true" && "$NIM_OK" == "true" ]]; then + echo "NIM_PASSED=true" >> $GITHUB_ENV + echo "All Nim checks passed!" + else + echo "NIM_PASSED=false" >> $GITHUB_ENV + echo "Nim checks found issues" + fi + cat nim-output.txt + continue-on-error: true + # Create Summary for Ruff - name: Create Ruff Lint Summary if: matrix.type == 'ruff' @@ -288,6 +334,37 @@ jobs: fi } >> $GITHUB_STEP_SUMMARY + # Create Summary for Nim + - name: Create Nim Lint Summary + if: matrix.type == 'nim' + run: | + { + echo "## Lint Results: ${{ matrix.name }}" + echo '' + + if [[ "${{ env.NIM_PASSED }}" == "true" ]]; then + echo '### nph + nim check: **Passed**' + echo 'No Nim issues found.' + else + echo '### nph + nim check: **Issues Found**' + echo '
View Nim output' + echo '' + echo '```' + head -100 nim-output.txt + echo '```' + echo '
' + fi + echo '' + + if [[ "${{ env.NIM_PASSED }}" == "true" ]]; then + echo '---' + echo '### All checks passed!' + else + echo '---' + echo '### Review the issues above' + fi + } >> $GITHUB_STEP_SUMMARY + # Exit with proper status - name: Check lint status run: | @@ -306,5 +383,10 @@ jobs: echo "Go lint checks failed" exit 1 fi + elif [[ "${{ matrix.type }}" == "nim" ]]; then + if [[ "${{ env.NIM_PASSED }}" == "false" ]]; then + echo "Nim lint checks failed" + exit 1 + fi fi echo "All lint checks passed" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 40e33a8a..98df1251 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -150,6 +150,16 @@ repos: files: ^PROJECTS/advanced/encrypted-p2p-chat/frontend/src/ pass_filenames: false + # Nim nph Checks + - repo: local + hooks: + - id: nph-credential-enumeration + name: nph check (credential-enumeration) + entry: bash -c 'cd PROJECTS/intermediate/credential-enumeration && nph --check src/' + language: system + files: ^PROJECTS/intermediate/credential-enumeration/src/ + pass_filenames: false + - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: diff --git a/PROJECTS/beginner/c2-beacon/backend/README.md b/PROJECTS/beginner/c2-beacon/backend/README.md deleted file mode 100644 index e69de29b..00000000 diff --git a/PROJECTS/intermediate/credential-enumeration/.gitignore b/PROJECTS/intermediate/credential-enumeration/.gitignore new file mode 100644 index 00000000..b428db43 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/.gitignore @@ -0,0 +1,10 @@ +# ©AngelaMos | 2026 +# .gitignore + +docs/ +bin/ +nimcache/ +credenum +*.exe +*.out +tests/docker/planted/ diff --git a/PROJECTS/intermediate/credential-enumeration/Justfile b/PROJECTS/intermediate/credential-enumeration/Justfile new file mode 100644 index 00000000..efd85b2d --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/Justfile @@ -0,0 +1,124 @@ +# ============================================================================= +# ©AngelaMos | 2026 +# Justfile +# ============================================================================= +# credenum — Post-access credential exposure detection for Linux systems +# ============================================================================= + +set export +set shell := ["bash", "-uc"] + +bin := "bin/credenum" +src := "src/harvester.nim" +version := `git describe --tags --always 2>/dev/null || echo "dev"` + +# ============================================================================= +# Default +# ============================================================================= + +default: + @just --list --unsorted + +# ============================================================================= +# Development +# ============================================================================= + +[group('dev')] +build: + @mkdir -p bin + nim c -o:{{bin}} {{src}} + @echo "Built: {{bin}} ($(du -h {{bin}} | cut -f1))" + +[group('dev')] +run *ARGS: build + ./{{bin}} {{ARGS}} + +[group('dev')] +scan *ARGS: build + ./{{bin}} --target $HOME {{ARGS}} + +[group('dev')] +check: + nim check {{src}} + +# ============================================================================= +# Build (Production) +# ============================================================================= + +[group('prod')] +release: + @mkdir -p bin + nim c -d:release -d:lto --opt:size -o:{{bin}} {{src}} + strip -s {{bin}} 2>/dev/null || true + @echo "Release: {{bin}} ($(du -h {{bin}} | cut -f1))" + +[group('prod')] +release-static: + @mkdir -p bin + nim c -d:release -d:musl -d:lto --opt:size -o:{{bin}} {{src}} + strip -s {{bin}} 2>/dev/null || true + @echo "Static release: {{bin}} ($(du -h {{bin}} | cut -f1))" + +[group('prod')] +release-small: release-static + upx --best {{bin}} + @echo "Compressed: {{bin}} ($(du -h {{bin}} | cut -f1))" + +[group('prod')] +build-x86: + @mkdir -p bin + nim c -d:release -d:zigcc -d:crossX86 -d:lto --opt:size -o:bin/credenum-x86_64 {{src}} + @echo "Cross-compiled: bin/credenum-x86_64" + +[group('prod')] +build-arm64: + @mkdir -p bin + nim c -d:release -d:zigcc -d:crossArm64 -d:lto --opt:size -o:bin/credenum-aarch64 {{src}} + @echo "Cross-compiled: bin/credenum-aarch64" + +# ============================================================================= +# Testing +# ============================================================================= + +[group('test')] +test: + nim c -r --path:src tests/test_all.nim + +[group('test')] +docker-build: + docker build -t credenum-test -f tests/docker/Dockerfile . + +[group('test')] +docker-test: docker-build + docker run --rm credenum-test + +# ============================================================================= +# Formatting +# ============================================================================= + +[group('lint')] +fmt: + nph src/ + +[group('lint')] +fmt-check: + nph --check src/ + +# ============================================================================= +# Utilities +# ============================================================================= + +[group('util')] +info: + @echo "Project: credential-enumeration" + @echo "Version: {{version}}" + @echo "Nim: $(nim --version | head -1)" + @echo "OS: {{os()}} ({{arch()}})" + @echo "Binary: {{bin}}" + @test -f {{bin}} && echo "Size: $(du -h {{bin}} | cut -f1)" || echo "Size: (not built)" + +[group('util')] +clean: + -rm -rf bin/ nimcache/ + -find . -name "nimcache" -type d -exec rm -rf {} + 2>/dev/null + @echo "Cleaned build artifacts." diff --git a/PROJECTS/intermediate/credential-enumeration/LICENSE b/PROJECTS/intermediate/credential-enumeration/LICENSE new file mode 100644 index 00000000..0ad25db4 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/PROJECTS/intermediate/credential-enumeration/README.md b/PROJECTS/intermediate/credential-enumeration/README.md new file mode 100644 index 00000000..7a3f761c --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/README.md @@ -0,0 +1,61 @@ +```ruby + ██████╗██████╗ ███████╗██████╗ ███████╗███╗ ██╗██╗ ██╗███╗ ███╗ +██╔════╝██╔══██╗██╔════╝██╔══██╗██╔════╝████╗ ██║██║ ██║████╗ ████║ +██║ ██████╔╝█████╗ ██║ ██║█████╗ ██╔██╗ ██║██║ ██║██╔████╔██║ +██║ ██╔══██╗██╔══╝ ██║ ██║██╔══╝ ██║╚██╗██║██║ ██║██║╚██╔╝██║ +╚██████╗██║ ██║███████╗██████╔╝███████╗██║ ╚████║╚██████╔╝██║ ╚═╝ ██║ + ╚═════╝╚═╝ ╚═╝╚══════╝╚═════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝ +``` + +[![Cybersecurity Projects](https://img.shields.io/badge/Cybersecurity--Projects-Project%20%2321%20intermediate-red?style=flat&logo=github)](https://github.com/CarterPerez-dev/Cybersecurity-Projects/tree/main/PROJECTS/intermediate/credential-enumeration) +[![Nim](https://img.shields.io/badge/Nim-2.2+-FFE953?style=flat&logo=nim&logoColor=black)](https://nim-lang.org) +[![License: AGPLv3](https://img.shields.io/badge/License-AGPL_v3-purple.svg)](https://www.gnu.org/licenses/agpl-3.0) +[![MITRE ATT&CK](https://img.shields.io/badge/MITRE_ATT%26CK-T1552-orange?style=flat)](https://attack.mitre.org/techniques/T1552/) + +> Post-access credential exposure detection for Linux systems, written in Nim. + +*This is a quick overview. Security theory, architecture, and full walkthroughs are in the [learn modules](#learn).* + +## What It Does + +- Scans Linux home directories for exposed credentials across 7 categories +- Detects unprotected SSH keys, plaintext cloud credentials, browser credential stores, shell history secrets, keyrings, Git tokens, and application credentials +- Classifies findings by severity based on file permissions and exposure risk +- Reports in terminal with color-coded output or structured JSON for automation +- Compiles to a single static binary with zero runtime dependencies + +## Quick Start + +```bash +bash install.sh +credenum +``` + +> [!TIP] +> This project uses [`just`](https://github.com/casey/just) as a command runner. Type `just` to see all available commands. +> +> Install: `curl -sSf https://just.systems/install.sh | bash -s -- --to ~/.local/bin` + +## Stack + +**Language:** Nim 2.2+ (ORC memory management) + +**Build:** Just, Nimble, musl (static linking), UPX (compression), zigcc (cross-compilation) + +**Testing:** Nim unittest, Docker (integration tests with planted credentials) + +## Learn + +This project includes step-by-step learning materials covering security theory, architecture, and implementation. + +| Module | Topic | +|--------|-------| +| [00 - Overview](learn/00-OVERVIEW.md) | Prerequisites and quick start | +| [01 - Concepts](learn/01-CONCEPTS.md) | Security theory and real-world breaches | +| [02 - Architecture](learn/02-ARCHITECTURE.md) | System design and data flow | +| [03 - Implementation](learn/03-IMPLEMENTATION.md) | Code walkthrough | +| [04 - Challenges](learn/04-CHALLENGES.md) | Extension ideas and exercises | + +## License + +AGPL 3.0 diff --git a/PROJECTS/intermediate/credential-enumeration/config.nims b/PROJECTS/intermediate/credential-enumeration/config.nims new file mode 100644 index 00000000..38965438 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/config.nims @@ -0,0 +1,36 @@ +# ©AngelaMos | 2026 +# config.nims + +switch("mm", "orc") + +when defined(musl): + var muslGcc = findExe("musl-gcc") + if muslGcc.len > 0: + switch("gcc.exe", muslGcc) + switch("gcc.linkerexe", muslGcc) + switch("passL", "-static") + +when defined(zigcc): + switch("cc", "clang") + switch("clang.exe", "zigcc") + switch("clang.linkerexe", "zigcc") + +when defined(release): + switch("opt", "size") + switch("passC", "-flto") + switch("passL", "-flto") + +when defined(strip): + switch("passL", "-s") + +when defined(crossX86): + switch("passC", "-target x86_64-linux-musl") + switch("passL", "-target x86_64-linux-musl") + switch("os", "linux") + switch("cpu", "amd64") + +when defined(crossArm64): + switch("passC", "-target aarch64-linux-musl") + switch("passL", "-target aarch64-linux-musl") + switch("os", "linux") + switch("cpu", "arm64") diff --git a/PROJECTS/intermediate/credential-enumeration/credential-enumeration.nimble b/PROJECTS/intermediate/credential-enumeration/credential-enumeration.nimble new file mode 100644 index 00000000..1decace5 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/credential-enumeration.nimble @@ -0,0 +1,13 @@ +# ©AngelaMos | 2026 +# credential-enumeration.nimble + +version = "0.1.0" +author = "AngelaMos" +description = "Post-access credential exposure detection for Linux systems" +license = "AGPL-3.0" +srcDir = "src" +binDir = "bin" +bin = @["credenum"] +namedBin = {"harvester": "credenum"}.toTable + +requires "nim >= 2.2.0" diff --git a/PROJECTS/intermediate/credential-enumeration/install.sh b/PROJECTS/intermediate/credential-enumeration/install.sh new file mode 100755 index 00000000..10c3c8b2 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/install.sh @@ -0,0 +1,178 @@ +#!/usr/bin/env bash +# ©AngelaMos | 2026 +# install.sh + +set -euo pipefail + +REPO_OWNER="CarterPerez-dev" +REPO_NAME="credential-enumeration" +BINARY="credenum" +INSTALL_DIR="${CREDENUM_INSTALL_DIR:-$HOME/.credenum/bin}" +VERSION="${CREDENUM_VERSION:-}" +MIN_NIM_MAJOR=2 +MIN_NIM_MINOR=2 + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +MAGENTA='\033[0;35m' +BOLD='\033[1m' +DIM='\033[2m' +NC='\033[0m' + +info() { echo -e " ${GREEN}+${NC} $1"; } +warn() { echo -e " ${YELLOW}!${NC} $1"; } +fail() { echo -e " ${RED}x${NC} $1"; exit 1; } +header() { echo -e "\n${BOLD}${CYAN}--- $1 ---${NC}\n"; } + +TMP_DIR="" +cleanup() { [[ -n "$TMP_DIR" ]] && rm -rf "$TMP_DIR"; } +trap cleanup EXIT + +echo -e "${BOLD}" +echo -e " ${RED} ▄▀▀ █▀▄ ██▀ █▀▄ ██▀ █▄ █ █ █ █▄▄▀▄${NC}" +echo -e " ${CYAN} ▀▄▄ █▀▄ █▄▄ █▄▀ █▄▄ █ ▀█ ▀▄█ █ ▀▄${NC}" +echo -e "${NC}" +echo -e " ${DIM}Post-access credential exposure detection for Linux${NC}" + +header "Detecting system" + +OS="$(uname -s)" +ARCH="$(uname -m)" + +case "$OS" in + Linux) OS="linux" ;; + Darwin) OS="darwin" ;; + MINGW*|MSYS*|CYGWIN*) fail "Windows is not supported. This tool targets Linux credential stores." ;; + *) fail "Unsupported OS: $OS" ;; +esac + +case "$ARCH" in + x86_64|amd64) ARCH="amd64" ;; + aarch64|arm64) ARCH="arm64" ;; + *) fail "Unsupported architecture: $ARCH" ;; +esac + +info "System: ${OS}/${ARCH}" + +header "Checking Nim" + +if ! command -v nim &>/dev/null; then + fail "Nim is not installed. + + Install via choosenim: + curl https://nim-lang.org/choosenim/init.sh -sSf | sh + + Or visit: https://nim-lang.org/install.html" +fi + +NIM_VER=$(nim --version | head -1 | grep -oP '\d+\.\d+\.\d+') +NIM_MAJOR=$(echo "$NIM_VER" | cut -d. -f1) +NIM_MINOR=$(echo "$NIM_VER" | cut -d. -f2) + +if [[ "$NIM_MAJOR" -lt "$MIN_NIM_MAJOR" ]] || { [[ "$NIM_MAJOR" -eq "$MIN_NIM_MAJOR" ]] && [[ "$NIM_MINOR" -lt "$MIN_NIM_MINOR" ]]; }; then + fail "Nim ${MIN_NIM_MAJOR}.${MIN_NIM_MINOR}+ required (found ${NIM_VER}). + Run: choosenim stable" +fi + +info "Nim ${NIM_VER}" + +header "Building from source" + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SRC_DIR="$SCRIPT_DIR" + +if [[ ! -f "$SRC_DIR/credential-enumeration.nimble" ]]; then + fail "Run install.sh from the project root directory." +fi + +info "Compiling credenum..." + +mkdir -p "$SRC_DIR/bin" + +STATIC_FLAGS="" +if command -v musl-gcc &>/dev/null; then + STATIC_FLAGS="-d:musl" + info "Using musl for static binary" +fi + +nim c -d:release ${STATIC_FLAGS} --opt:size -o:"$SRC_DIR/bin/credenum" "$SRC_DIR/src/harvester.nim" +strip -s "$SRC_DIR/bin/credenum" 2>/dev/null || true + +info "Built: bin/credenum ($(du -h "$SRC_DIR/bin/credenum" | cut -f1))" + +header "Installing" + +mkdir -p "$INSTALL_DIR" +cp "$SRC_DIR/bin/credenum" "$INSTALL_DIR/" +chmod +x "$INSTALL_DIR/$BINARY" +info "Installed to ${INSTALL_DIR}/${BINARY}" + +header "Configuring PATH" + +PATH_UPDATED=false + +case ":$PATH:" in + *":${INSTALL_DIR}:"*) + info "${INSTALL_DIR} already in PATH" + PATH_UPDATED=true + ;; +esac + +if [[ "$PATH_UPDATED" == "false" ]]; then + CURRENT_SHELL="$(basename "${SHELL:-/bin/bash}")" + TARGET="" + + case "$CURRENT_SHELL" in + zsh) + [[ -f "$HOME/.zshrc" ]] && TARGET="$HOME/.zshrc" + ;; + bash) + if [[ -f "$HOME/.bashrc" ]]; then + TARGET="$HOME/.bashrc" + elif [[ -f "$HOME/.bash_profile" ]]; then + TARGET="$HOME/.bash_profile" + fi + ;; + fish) + mkdir -p "$HOME/.config/fish/conf.d" + echo "set -gx PATH \"$INSTALL_DIR\" \$PATH" > "$HOME/.config/fish/conf.d/credenum.fish" + info "Added to ~/.config/fish/conf.d/credenum.fish" + PATH_UPDATED=true + ;; + esac + + if [[ "$PATH_UPDATED" == "false" && -z "${TARGET:-}" ]]; then + [[ -f "$HOME/.profile" ]] && TARGET="$HOME/.profile" + fi + + if [[ "$PATH_UPDATED" == "false" && -n "${TARGET:-}" ]]; then + if ! grep -q "$INSTALL_DIR" "$TARGET" 2>/dev/null; then + printf '\nexport PATH="%s:$PATH"\n' "$INSTALL_DIR" >> "$TARGET" + info "Added to ${TARGET}" + else + info "Already configured in ${TARGET}" + fi + fi +fi + +echo "" +echo -e " ${GREEN}${BOLD}credenum installed successfully${NC}" +echo "" + +if ! command -v credenum &>/dev/null; then + warn "Restart your shell or run:" + echo -e " ${BOLD}export PATH=\"${INSTALL_DIR}:\$PATH\"${NC}" + echo "" +fi + +echo -e " ${DIM}Quick start:${NC}" +echo "" +echo -e " ${CYAN}credenum${NC} Scan current user" +echo -e " ${CYAN}credenum --format json${NC} JSON output" +echo -e " ${CYAN}credenum --modules ssh,git${NC} Scan specific modules" +echo -e " ${CYAN}credenum --dry-run${NC} Preview scan paths" +echo "" +echo -e " ${DIM}Docs: https://github.com/${REPO_OWNER}/Cybersecurity-Projects${NC}" +echo "" diff --git a/PROJECTS/intermediate/credential-enumeration/learn/00-OVERVIEW.md b/PROJECTS/intermediate/credential-enumeration/learn/00-OVERVIEW.md new file mode 100644 index 00000000..ec546028 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/learn/00-OVERVIEW.md @@ -0,0 +1,165 @@ +# 00-OVERVIEW.md + +# Credential Enumeration + +## What This Is + +A command-line tool that scans Linux home directories for exposed credentials after gaining access to a system. It checks 7 categories of credential storage: SSH keys, browser data, cloud provider configs (AWS/GCP/Azure/Kubernetes), shell history, keyrings, Git credential stores, and application tokens. Each finding is classified by severity based on file permissions and exposure risk. Written in Nim and compiled to a single static binary. + +## Why This Matters + +Credentials left in home directories are one of the most reliable footholds attackers find after initial access. The LastPass breach in 2022-2023 traced back to a DevOps engineer's home machine where an attacker found SSH keys and cloud credentials that gave access to production vaults. In the CircleCI incident (January 2023), an engineer's laptop was compromised and session tokens were stolen from browser storage, which gave the attacker access to customer secrets across the platform. The Uber breach in September 2022 started with social engineering but escalated because hardcoded credentials were sitting in PowerShell scripts on internal network shares. + +These aren't exotic attack techniques. MITRE ATT&CK catalogs them as T1552 (Unsecured Credentials) with sub-techniques for credentials in files (T1552.001), bash history (T1552.003), and private keys (T1552.004). Separately, T1555 covers credentials from password stores (T1555.001 for keychains, T1555.003 for web browsers). These techniques appear in nearly every post-access kill chain because they work so often and so reliably. + +This project teaches you what attackers look for, where they look, and how to detect that exposure before it gets exploited. + +**Real world scenarios where this applies:** +- Red team operators mapping available credentials after landing on a Linux workstation +- Blue team defenders auditing developer machines for credential hygiene +- Security engineers building credential exposure into CI/CD compliance checks +- System administrators verifying SSH key permissions across fleet machines +- Incident responders determining what credentials an attacker could have accessed + +## What You'll Learn + +**Security Concepts:** +- MITRE ATT&CK credential access techniques (T1552, T1555) and how they map to real file system locations +- Linux file permission model and why 0644 on an SSH private key is a finding but 0600 is not +- How browsers store credentials (Firefox's logins.json + key4.db, Chromium's Login Data SQLite database) +- Cloud credential storage patterns across AWS, GCP, Azure, and Kubernetes +- Shell history as an intelligence source: leaked secrets in exports, credential-bearing commands, .env files + +**Technical Skills:** +- Building a modular scanner architecture with pluggable collectors in Nim +- Unix file permission inspection using POSIX stat syscalls +- Pattern matching for secret detection across shell history and config files +- Structured severity classification based on permission analysis +- Dual output rendering: colored terminal with box drawing and structured JSON + +**Tools and Techniques:** +- Nim systems programming with ORC memory management and zero-exception guarantees (`{.push raises: [].}`) +- Static binary compilation with musl for deployment without runtime dependencies +- Cross-compilation targeting x86_64 and aarch64 via zigcc +- Docker-based integration testing with planted credential fixtures +- Just as a task runner for build, test, and release workflows + +## Prerequisites + +**Required knowledge:** +- Linux fundamentals: navigating the file system, understanding home directory layout, reading file permissions with `ls -la` +- Basic programming concepts: functions, loops, conditionals, data structures. Nim reads like Python with type annotations, so Python experience transfers well +- Security basics: what credentials are, why unprotected credentials are dangerous, what SSH keys do + +**Tools you'll need:** +- Nim 2.2.0+ with nimble package manager +- Docker (for integration tests) +- Just task runner (optional but recommended) +- A Linux system (the tool targets Linux credential stores specifically) + +**Helpful but not required:** +- Familiarity with Nim syntax. If you know Python, you can read Nim. The significant differences are compile-time types and manual memory layout +- Experience with penetration testing or red team operations provides context for why these credential locations matter +- Understanding of cloud provider authentication (AWS IAM, GCP service accounts, Kubernetes RBAC) + +## Quick Start + +```bash +cd PROJECTS/intermediate/credential-enumeration + +bash install.sh + +credenum +``` + +Expected output: A colored terminal report showing findings grouped by module (SSH, browser, cloud, history, keyring, git, apptoken). Each finding shows a severity badge, file path, permissions, and modification timestamp. The summary at the bottom shows total findings by severity. + +To run with JSON output: + +```bash +credenum --format json +``` + +To scan specific modules only: + +```bash +credenum --modules ssh,git,cloud +``` + +To run the integration test suite: + +```bash +just docker-test +``` + +This builds a Docker container with planted credential fixtures across all 7 categories and validates that the scanner detects each one. + +## Project Structure + +``` +credential-enumeration/ +├── src/ +│ ├── harvester.nim # Entry point, CLI parser, main orchestration +│ ├── runner.nim # Routes categories to collectors, aggregates results +│ ├── types.nim # Core data structures (Finding, Report, Severity, etc) +│ ├── config.nim # All constants: paths, patterns, thresholds, colors +│ ├── collectors/ +│ │ ├── base.nim # Shared utilities: file ops, permissions, finding factories +│ │ ├── browser.nim # Firefox profiles + Chromium variants +│ │ ├── ssh.nim # Private keys, config, authorized_keys, known_hosts +│ │ ├── git.nim # .git-credentials, config helpers, GitHub/GitLab tokens +│ │ ├── cloud.nim # AWS, GCP, Azure, Kubernetes credential files +│ │ ├── history.nim # Shell history secrets, command patterns, .env files +│ │ ├── keyring.nim # GNOME Keyring, KDE Wallet, KeePass, pass, Bitwarden +│ │ └── apptoken.nim # Database creds, dev tokens, infra tokens, Docker auth +│ └── output/ +│ ├── terminal.nim # Box-drawn colored terminal renderer +│ └── json.nim # Structured JSON output with metadata +├── tests/ +│ ├── test_all.nim # Unit tests (30+ cases) +│ └── docker/ +│ ├── Dockerfile # Multi-stage: nim builder + ubuntu test runtime +│ ├── validate.sh # Integration test: runs scanner, checks all 7 categories +│ └── planted/ # Credential fixtures (SSH keys, AWS creds, tokens, etc) +├── config.nims # Nim compiler switches (ORC, musl, zigcc, cross-compile) +├── credential-enumeration.nimble # Package manifest +├── Justfile # Build, test, release, format commands +└── install.sh # One-step install: compile + PATH setup +``` + +## Next Steps + +1. **Understand the concepts** - Read [01-CONCEPTS.md](./01-CONCEPTS.md) to learn about credential exposure vectors, Linux file permissions, how browsers and cloud providers store secrets, and real breaches driven by unsecured credentials +2. **Study the architecture** - Read [02-ARCHITECTURE.md](./02-ARCHITECTURE.md) to see the collector-based design, how severity classification works, and why the type system is structured the way it is +3. **Walk through the code** - Read [03-IMPLEMENTATION.md](./03-IMPLEMENTATION.md) for detailed breakdowns of the CLI parser, each collector module, the permission analysis logic, and the output renderers +4. **Extend the project** - Read [04-CHALLENGES.md](./04-CHALLENGES.md) for ideas ranging from adding new collectors to building a remediation engine + +## Common Issues + +**Nim version too old** +``` +Error: Nim 2.2+ required (found 1.6.x) +``` +Solution: Update via `choosenim stable` or install from https://nim-lang.org/install.html + +**Static build fails (musl not found)** +``` +Error: musl-gcc not found +``` +Solution: Install musl tools. On Debian/Ubuntu: `apt install musl-tools`. On Arch: `pacman -S musl`. The install script falls back to dynamic linking if musl is unavailable. + +**Docker test shows 0 findings** +``` +Results: 0 passed, 30 failed +``` +Solution: The planted credential fixtures may not have been copied. Check that `tests/docker/planted/` contains the test files and rebuild with `just docker-build`. + +**Binary too large** +The debug build produces a ~2MB binary. For a smaller binary: `just release-small` compiles with optimizations, strips symbols, and compresses with UPX, producing a binary under 200KB. + +## Related Projects + +If you found this interesting, check out: +- **[secrets-scanner](https://github.com/CarterPerez-dev/Cybersecurity-Projects/tree/main/PROJECTS/intermediate/secrets-scanner)** - Scans codebases and git history for leaked secrets using entropy analysis and pattern matching. Complements this project by covering the repository side rather than the file system side +- **[docker-security-audit](https://github.com/CarterPerez-dev/Cybersecurity-Projects/tree/main/PROJECTS/intermediate/docker-security-audit)** - CIS Docker Benchmark scanner. Focuses on container security, where misconfigured containers can expose the same credential files this tool detects +- **[api-rate-limiter](https://github.com/CarterPerez-dev/Cybersecurity-Projects/tree/main/PROJECTS/advanced/api-rate-limiter)** - Enterprise rate limiting for FastAPI. Shows how stolen API credentials (the kind this tool finds) get used to abuse API endpoints diff --git a/PROJECTS/intermediate/credential-enumeration/learn/01-CONCEPTS.md b/PROJECTS/intermediate/credential-enumeration/learn/01-CONCEPTS.md new file mode 100644 index 00000000..29201de0 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/learn/01-CONCEPTS.md @@ -0,0 +1,335 @@ +# 01-CONCEPTS.md + +# Security Concepts + +This document covers the security fundamentals behind credential enumeration: what credentials exist on a typical Linux system, where they live, why they're exposed, and what real attackers do with them. + +## Credential Exposure and Post-Access Enumeration + +### What It Is + +Credential exposure is when authentication material (passwords, tokens, private keys, API secrets) is stored in a way that allows unauthorized access. Post-access enumeration is the phase of an attack where, having gained some level of access to a system, the attacker systematically searches for additional credentials to expand their reach. + +This is one of the first things attackers do after landing on a machine. Not because it's clever, but because it works. Developer workstations accumulate credentials like lint. AWS keys in `~/.aws/credentials`. SSH private keys without passphrases in `~/.ssh/`. GitHub tokens in `~/.gitconfig`. Database passwords in `~/.pgpass`. Vault tokens in `~/.vault-token`. Most of these files sit there for months or years, rarely audited, often with permissions looser than they need to be. + +### Why It Matters + +The Uber breach in September 2022 is the textbook example. An 18-year-old attacker purchased stolen credentials from the dark web, bypassed MFA through push notification fatigue, and then found hardcoded credentials in PowerShell scripts on internal network shares. Those credentials gave access to the AWS console, Google Workspace admin, Duo admin panel, and Uber's HackerOne bug bounty dashboard. The initial compromise was social engineering; the escalation was credential harvesting. + +The LastPass breach (2022-2023) is even more direct. Attackers compromised a senior DevOps engineer's home machine, found SSH keys and decryption keys stored locally, and used them to access production cloud storage containing encrypted customer vaults. The engineer had legitimate access; the credentials on their home machine provided the path. + +In the Codecov supply chain attack (April 2021), the attackers modified a bash uploader script to exfiltrate environment variables from CI/CD pipelines. The leaked variables included tokens, API keys, and credentials that CI systems had access to. Thousands of customers were affected because their build environments had credentials available as environment variables. + +### MITRE ATT&CK Mapping + +This project directly implements detection for these techniques: + +| Technique ID | Name | What We Detect | +|-------------|------|----------------| +| T1552 | Unsecured Credentials | Parent technique for all credential exposure | +| T1552.001 | Credentials In Files | AWS credentials, .pgpass, .my.cnf, .netrc, .git-credentials, .env files | +| T1552.003 | Bash History | Secrets in shell history (export TOKEN=, curl -u), command patterns | +| T1552.004 | Private Keys | SSH keys (encrypted/unencrypted), GCP service account keys | +| T1555 | Credentials from Password Stores | GNOME Keyring, KDE Wallet, KeePass databases, pass store | +| T1555.001 | Keychain | Desktop keyring databases | +| T1555.003 | Credentials from Web Browsers | Firefox logins.json/key4.db, Chromium Login Data | +| T1539 | Steal Web Session Cookie | Firefox cookies.sqlite, Chromium Cookies database | + +## Linux File Permissions + +### What They Are + +Every file on a Linux system has three sets of permissions: owner, group, and others (world). Each set can have read (r=4), write (w=2), and execute (x=1) bits. When you see `0600` on an SSH key, that means the owner can read and write it, but nobody else can see it. When you see `0644`, the owner can read/write but the group and everyone else can read it. + +### Why This Is Critical for Credentials + +File permissions are the primary defense for credential files on disk. An SSH private key with `0644` permissions means any user on the system can read it. On a shared server, any compromised service running as any user can steal that key. On a single-user workstation, malware running as a different user (or a container escape) gets immediate access. + +OpenSSH itself refuses to use a private key with overly permissive permissions. It will print `WARNING: UNPROTECTED PRIVATE KEY FILE!` and refuse to authenticate. But other credential files have no such guard. Your AWS credentials file at `~/.aws/credentials` doesn't care about its own permissions. Neither does `~/.pgpass`, `~/.git-credentials`, or `~/.vault-token`. They'll be read by their respective tools regardless of how exposed they are. + +### How the Permission Model Works + +``` +Permission bits: Owner Group Others + rwx rwx rwx +0600 = rw- --- --- (owner read/write only) +0644 = rw- r-- r-- (everyone can read) +0700 = rwx --- --- (owner full access, directory) +0755 = rwx r-x r-x (everyone can read/enter directory) +``` + +The permission check hierarchy this tool uses: + +| Condition | Severity | Reasoning | +|-----------|----------|-----------| +| World-readable (others has read bit, `0o004`) | CRITICAL | Any user or process on the system can read the file | +| Group-readable (group has read bit, `0o040`) | MEDIUM-HIGH | Users sharing the group can read it; common in shared hosting | +| Looser than expected (e.g., 0644 instead of 0600) | LOW | More permissive than best practice but not immediately exploitable | +| Owner-only (0600 file, 0700 directory) | INFO | Correct permissions, noted for completeness | + +### Real World: The Capital One Breach Connection + +The 2019 Capital One breach involved a misconfigured WAF that allowed SSRF, which was used to query the EC2 instance metadata service and retrieve IAM role credentials. While that's a cloud-specific attack path, the underlying principle is the same: credentials that are accessible to processes that shouldn't have them. On a Linux workstation, overly permissive file permissions create the same class of exposure at the filesystem level. + +## Browser Credential Storage + +### How Browsers Store Credentials + +Firefox and Chromium-based browsers both store credentials locally in the user's home directory. + +**Firefox** uses a profile-based system rooted at `~/.mozilla/firefox/`. Each profile directory contains: +- `logins.json` - Stored usernames and passwords (encrypted with a key from key4.db) +- `key4.db` - NSS key database that holds the encryption key for logins.json +- `cookies.sqlite` - Session cookies that can be used for session hijacking + +Firefox profiles are listed in `profiles.ini`. A user might have multiple profiles (personal, work), each with their own credential stores. + +**Chromium-based browsers** (Chrome, Brave, Vivaldi, Chromium) store data under `~/.config//Default/` (and numbered profiles like `Profile 1`, `Profile 2`): +- `Login Data` - SQLite database of stored passwords +- `Cookies` - Session cookies +- `Web Data` - Autofill data, saved payment methods + +On Linux, Chromium encrypts stored passwords using the system keyring (GNOME Keyring or KWallet). Without the keyring unlocked, the encrypted passwords aren't directly usable, but the files still reveal which sites have stored credentials and session cookies may be usable without decryption. + +### Why This Matters + +The CircleCI breach in January 2023 involved a compromised engineer's laptop where session tokens were stolen from browser storage. Those tokens provided access to CircleCI's internal systems, which in turn held customer secrets (environment variables, API keys, tokens). The attacker didn't need to crack passwords. Session cookies from browser storage were enough. + +Browser credential databases being world-readable (0644) is a CRITICAL finding because it means any process on the system can read the encrypted credentials and session cookies. Even with encryption, cookies are often immediately usable for session hijacking. + +## SSH Key Security + +### How SSH Keys Work + +SSH key pairs consist of a private key (stored locally) and a public key (placed on remote servers in `~/.ssh/authorized_keys`). The private key proves your identity. If someone has your private key, they can authenticate as you to any server that trusts the corresponding public key. + +Private keys come in several formats: +- OpenSSH format (`-----BEGIN OPENSSH PRIVATE KEY-----`) - modern default +- RSA PEM (`-----BEGIN RSA PRIVATE KEY-----`) - legacy format +- ECDSA PEM (`-----BEGIN EC PRIVATE KEY-----`) - elliptic curve +- DSA PEM (`-----BEGIN DSA PRIVATE KEY-----`) - deprecated but still found +- PKCS#8 (`-----BEGIN PRIVATE KEY-----`) - generic wrapper + +### Passphrase Protection + +Private keys can optionally be encrypted with a passphrase. An encrypted key contains markers like `ENCRYPTED`, `Proc-Type: 4,ENCRYPTED`, `bcrypt`, or `aes256-ctr` in its header. Without the passphrase, the key file is useless to an attacker. But an unencrypted key is immediately usable. + +The severity breakdown: + +| Key State | Permissions | Severity | Why | +|-----------|------------|----------|-----| +| Unencrypted | World-readable | CRITICAL | Anyone can steal and immediately use the key | +| Encrypted | World-readable | CRITICAL | Passphrase can be brute-forced offline | +| Unencrypted | Owner-only | HIGH | Correct permissions but no defense-in-depth | +| Encrypted | Owner-only | INFO | Both protections in place | + +### SSH Config Weaknesses + +The SSH config file (`~/.ssh/config`) can also reveal security issues: +- `PasswordAuthentication yes` - allows password-based auth, weaker than key-based +- `StrictHostKeyChecking no` - disables host key verification, enabling machine-in-the-middle attacks +- Host entries reveal which servers the user connects to, giving attackers a target list + +### Real World: The Codecov Breach Chain + +When Codecov's bash uploader was compromised in 2021, one of the credential types exfiltrated from CI/CD environments was SSH keys. Attackers used stolen SSH keys from Twitch's CI pipeline to access internal Git repositories, which contributed to the massive Twitch source code leak in October 2021. SSH keys found in one environment became the entry point into another. + +## Cloud Provider Credentials + +### AWS + +AWS credentials live in `~/.aws/credentials` in INI format with profiles. Each profile can contain: +- `aws_access_key_id` - starts with `AKIA` for static keys or `ASIA` for temporary session keys +- `aws_secret_access_key` - the secret component +- `aws_session_token` - present for temporary credentials + +Static keys (`AKIA`) are long-lived and the highest risk. They work until explicitly rotated or deleted. Session keys (`ASIA`) are temporary but still dangerous during their validity window. The companion file `~/.aws/config` contains profiles, region settings, and optionally SSO or MFA configurations. + +A world-readable AWS credentials file is a CRITICAL finding. Any static key found there can be used to make API calls to AWS services with whatever permissions the associated IAM user or role has. + +### GCP + +Google Cloud credentials are stored in `~/.config/gcloud/`. The most sensitive file is `application_default_credentials.json`, which can contain either user credentials (from `gcloud auth application-default login`) or a service account key (a JSON file with a private key). Service account keys are HIGH severity because they don't expire and often have broad permissions. User credentials are MEDIUM because they're tied to an interactive session and may have short-lived tokens. + +### Azure + +Azure CLI stores token caches at `~/.azure/accessTokens.json` and `~/.azure/msal_token_cache.json`. These contain OAuth tokens that can be used to make Azure API calls. On a multi-user system, a readable token cache means other users can impersonate the authenticated Azure user. + +### Kubernetes + +The Kubernetes config at `~/.kube/config` contains cluster contexts, user definitions, and authentication data. This can include: +- Bearer tokens (direct API access) +- Client certificate data (embedded certs) +- Auth provider configurations + +A Kubernetes config with bearer tokens is HIGH severity because those tokens often provide cluster-admin or broad namespace access. The 2022 TeamTNT campaign specifically targeted Kubernetes credentials on compromised hosts to spread across container clusters. + +## Shell History as an Attack Surface + +### What's in Shell History + +Shell history files (`~/.bash_history`, `~/.zsh_history`, `~/.fish_history`) record every command typed in a terminal session. Developers routinely type secrets directly into their shells: + +**Secret exports:** +``` +export API_KEY=sk-proj-abc123... +export DATABASE_URL=postgresql://admin:password@prod.db:5432/app +export AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY +``` + +**Credential-bearing commands:** +``` +curl -H "Authorization: Bearer ghp_xxxx" https://api.github.com/repos +curl -u admin:s3cret https://internal-api.corp.net/deploy +mysql -u root -pMyPassword production_db +sshpass -p 'server_pass' ssh deploy@prod.server.com +``` + +These entries persist in history files indefinitely unless explicitly cleared. On a compromised workstation, shell history is one of the first places an attacker checks. + +### Environment Files + +Beyond shell history, `.env` files scattered across project directories contain application secrets. These files follow the `KEY=value` pattern and are used by frameworks and tools to load configuration. A recursive scan of the home directory commonly finds `.env`, `.env.local`, `.env.production`, and `.env.staging` files containing database passwords, API keys, and service tokens. + +### Real World: The Dropbox Breach + +In November 2022, Dropbox disclosed that an attacker accessed 130 internal GitHub repositories after a phishing attack. The investigation found that the attacker obtained credentials that were stored in environment configuration used by CI/CD systems. The pattern is consistent: credentials in configuration files, accessible after initial access. + +## Keyrings and Password Managers + +### Desktop Keyrings + +Linux desktop environments provide system-level credential storage: + +**GNOME Keyring** (`~/.local/share/keyrings/`) stores passwords, SSH key passphrases, and application secrets in `.keyring` files. The default keyring is unlocked when the user logs in and stays unlocked for the session. If the keyring files are readable by other users, the encrypted contents can be exfiltrated for offline attack. + +**KDE Wallet** (`~/.local/share/kwalletd/`) serves the same purpose for KDE desktops. + +### Password Manager Databases + +**KeePass** databases (`.kdbx` files) can exist anywhere in the home directory. They're encrypted with a master password (and optionally a key file), but finding a KeePass database tells an attacker that a password vault exists and is worth targeting. The database file plus a keylogger for the master password gives access to every stored credential. + +**pass** (password-store) at `~/.password-store/` uses GPG-encrypted files organized as directories. Each `.gpg` file is one credential. The number of entries reveals the scope of stored credentials. + +**Bitwarden** stores local vault data at `~/.config/Bitwarden/` and `~/.config/Bitwarden CLI/`. Like KeePass, the vault is encrypted, but its presence and accessibility are worth documenting. + +## Git Credential Storage + +### Plaintext Git Credentials + +The file `~/.git-credentials` stores credentials in plaintext URL format: `https://user:token@github.com`. This file is created when using the `store` credential helper (`git config credential.helper store`). Each line is a full URL with embedded authentication. This is HIGH severity by default and CRITICAL if world-readable. + +### Credential Helpers + +Git config files (`~/.gitconfig`, `~/.config/git/config`) specify credential helpers. The `store` helper saves to `.git-credentials` in plaintext. Other helpers like `cache` (temporary in-memory), `osxkeychain`, or `gnome-keyring` are more secure but their configuration still reveals how the user manages Git authentication. + +### Token Patterns + +GitHub personal access tokens follow known prefixes: `ghp_` (classic PAT), `gho_` (OAuth), `ghu_` (user-to-server), `ghs_` (server-to-server), `ghr_` (refresh). GitLab tokens start with `glpat-`. Finding these patterns in Git config files means tokens have been hardcoded, likely inadvertently. + +### Real World: The Mercedes-Benz Leak + +In January 2024, security researchers found a GitHub token in a public Mercedes-Benz repository that provided unrestricted access to the company's internal GitHub Enterprise Server. The token, likely committed by mistake, exposed source code, cloud access keys, blueprints, and internal design documents. This is the exact class of exposure Git credential scanning detects. + +## Application Tokens and Database Credentials + +### Database Credential Files + +Several database clients support credential files in the home directory: + +- `~/.pgpass` - PostgreSQL password file. Format: `hostname:port:database:username:password`, one entry per line. PostgreSQL enforces 0600 permissions on this file, but doesn't prevent the file from existing with worse permissions +- `~/.my.cnf` - MySQL client configuration. Can contain `[client]` sections with `password=` entries +- `~/.rediscli_auth` - Redis CLI authentication credentials +- `~/.mongorc.js` - MongoDB shell startup file, may contain authentication commands + +### Development Tokens + +- `~/.npmrc` - npm registry authentication. Contains `_authToken=` for package publishing +- `~/.pypirc` - PyPI credentials for publishing Python packages +- `~/.config/gh/hosts.yml` - GitHub CLI OAuth tokens + +### Infrastructure Tokens + +- `~/.terraform.d/credentials.tfrc.json` - Terraform Cloud API tokens +- `~/.vault-token` - HashiCorp Vault authentication token +- `~/.config/helm/repositories.yaml` - Helm chart repository credentials +- `~/.config/rclone/rclone.conf` - Rclone cloud storage credentials (may contain OAuth tokens or API keys) +- `~/.docker/config.json` - Docker registry authentication tokens + +### Application Data + +Desktop applications store session data locally: +- Slack (`~/.config/Slack/`) - Workspace session tokens +- Discord (`~/.config/discord/`) - Authentication tokens +- VS Code (`~/.config/Code/`) - Extension credentials, potentially including SSH keys and API tokens in settings + +## Common Pitfalls + +**Assuming encryption means safety.** An encrypted SSH key with 0644 permissions is still a CRITICAL finding. The encrypted key can be exfiltrated and the passphrase brute-forced offline with tools like John the Ripper. Encryption is defense-in-depth, not a substitute for proper permissions. + +**Ignoring "just config" files.** AWS config (`~/.aws/config`) doesn't contain secrets directly, but it reveals account structure, regions, and whether MFA is required. Kubernetes config without tokens still shows cluster endpoints and namespaces. This reconnaissance data helps attackers plan further exploitation. + +**Forgetting about temporary files.** Shell history accumulates over time. A secret exported six months ago is still in `.bash_history` unless manually cleaned. Environment files in project directories may have been created during development and never removed after deployment. + +**Trusting single-user systems.** "I'm the only user on this machine" doesn't mean credentials are safe. Malware, container escapes, browser exploits, and compromised applications all run as processes with some level of file system access. World-readable credentials are accessible to all of them. + +## How These Concepts Relate + +``` + ┌──────────────────────┐ + │ Initial Access │ + │ (phishing, exploit, │ + │ stolen creds, etc) │ + └──────────┬───────────┘ + │ + ▼ + ┌──────────────────────┐ + │ Post-Access │ + │ Enumeration │ + │ (this tool) │ + └──────────┬───────────┘ + │ + ┌──────────────┬─────────┼─────────┬────────────────┐ + ▼ ▼ ▼ ▼ ▼ + ┌──────────────┐ ┌─────────┐ ┌───────┐ ┌─────────┐ ┌───────────┐ + │ SSH Keys │ │ Cloud │ │ Git │ │ Browser │ │ App │ + │ T1552.004 │ │ T1552.001│ │Tokens │ │ T1555.003│ │ Tokens │ + └──────┬───────┘ └────┬────┘ └───┬───┘ └────┬────┘ └─────┬─────┘ + │ │ │ │ │ + ▼ ▼ ▼ ▼ ▼ + ┌──────────────┐ ┌─────────┐ ┌───────┐ ┌─────────┐ ┌───────────┐ + │ Lateral │ │ Cloud │ │ Source │ │ Session │ │ Database │ + │ Movement │ │ Pivot │ │ Code │ │ Hijack │ │ Access │ + │ │ │ │ │ Access │ │ │ │ │ + └──────────────┘ └─────────┘ └───────┘ └─────────┘ └───────────┘ +``` + +Each credential type enables a different escalation path. SSH keys enable lateral movement to other servers. Cloud credentials pivot into cloud infrastructure. Git tokens expose source code repositories. Browser cookies enable session hijacking. Application tokens give direct access to databases and services. + +The common thread is file permissions. Every finding in this tool comes down to: is the credential file accessible to more entities than it should be, and is the credential itself protected (encrypted, passphrase-protected) or in plaintext? + +## Testing Your Understanding + +Before moving to the architecture, make sure you can answer: + +1. Why is an unencrypted SSH key with 0600 permissions rated HIGH rather than CRITICAL? What would push it to CRITICAL? +2. An attacker finds `~/.aws/credentials` with two profiles: one using `AKIA` keys and one using `ASIA` keys. Which is more concerning and why? +3. Why does this tool scan for `.env` files recursively but limits depth to 5 directories? What would happen without a depth limit? +4. A Firefox logins.json file is encrypted. Why is it still a finding? +5. How does shell history scanning differ from environment file scanning in terms of what's detected and why the severity differs? + +## Further Reading + +**Essential:** +- [MITRE ATT&CK: Unsecured Credentials](https://attack.mitre.org/techniques/T1552/) - The framework mapping for everything this tool detects +- [MITRE ATT&CK: Credentials from Password Stores](https://attack.mitre.org/techniques/T1555/) - Browser and keyring credential theft +- [CIS Benchmarks for Linux](https://www.cisecurity.org/benchmark/distribution_independent_linux) - File permission hardening recommendations + +**Deep Dives:** +- [Uber Security Incident Report (2022)](https://www.uber.com/newsroom/security-update/) - Post-access credential harvesting in practice +- [CircleCI Security Incident (2023)](https://circleci.com/blog/jan-4-2023-incident-report/) - Browser token theft leading to platform compromise +- [LastPass Security Incident (2022-2023)](https://blog.lastpass.com/2023/03/security-incident-update-recommended-actions/) - Home machine credential theft leading to production breach + +**Historical Context:** +- [Codecov Supply Chain Attack (2021)](https://about.codecov.io/security-update/) - Environment variable exfiltration at scale +- [Twitch Source Code Leak (2021)](https://blog.twitch.tv/en/2021/10/06/updates-on-the-twitch-security-incident/) - Stolen credentials enabling source code access diff --git a/PROJECTS/intermediate/credential-enumeration/learn/02-ARCHITECTURE.md b/PROJECTS/intermediate/credential-enumeration/learn/02-ARCHITECTURE.md new file mode 100644 index 00000000..2ef0b333 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/learn/02-ARCHITECTURE.md @@ -0,0 +1,498 @@ +# 02-ARCHITECTURE.md + +# Architecture + +This document covers the system design of credenum: how components connect, why they're structured this way, and the trade-offs behind the design decisions. + +## High-Level Architecture + +``` +┌──────────────────────────────────────────────────────────────────────────┐ +│ CLI (harvester.nim) │ +│ Parse arguments → Validate config → Route to dry-run or scan │ +└──────────────────────────────────┬───────────────────────────────────────┘ + │ HarvestConfig + ▼ +┌──────────────────────────────────────────────────────────────────────────┐ +│ Runner (runner.nim) │ +│ Iterate enabled modules → Dispatch to collectors → Aggregate results │ +└───────┬──────────┬──────────┬──────────┬──────────┬──────────┬──────────┘ + │ │ │ │ │ │ + ▼ ▼ ▼ ▼ ▼ ▼ + ┌──────────┐┌─────────┐┌───────┐┌─────────┐┌─────────┐┌──────────┐ + │ browser ││ ssh ││ cloud ││ history ││ keyring ││ git │ + └──────────┘└─────────┘└───────┘└─────────┘└─────────┘└──────────┘ + │ │ │ │ │ │ + └──────────┴──────────┴──────────┴──────────┴──────────┘ + │ + ┌──────┴──────┐ + │ apptoken │ + └─────────────┘ + │ + All collectors share base.nim utilities + All return CollectorResult + │ + ▼ +┌──────────────────────────────────────────────────────────────────────────┐ +│ Report Assembly │ +│ Combine CollectorResults → Calculate severity summary → Add metadata │ +└──────────────────────────────┬───────────────────────────────────────────┘ + │ Report + ┌──────────┴──────────┐ + ▼ ▼ + ┌─────────────────┐ ┌─────────────────┐ + │ terminal.nim │ │ json.nim │ + │ Box-drawn color │ │ Structured JSON │ + │ output to stdout│ │ to stdout/file │ + └─────────────────┘ └─────────────────┘ + │ │ + ▼ ▼ + Exit code 0/1 Exit code 0/1 + (0=clean, 1=high/critical findings) +``` + +## Component Breakdown + +### CLI Layer (harvester.nim) + +**Purpose:** Parse command-line arguments, build configuration, and orchestrate the scan lifecycle. + +**Responsibilities:** +- Parse CLI flags using Nim's `parseopt` (no external dependencies) +- Construct a `HarvestConfig` with validated settings +- Short-circuit for `--help`, `--version`, and `--dry-run` +- Call the runner, render output, and determine exit code + +**Interfaces:** +- Input: Raw command-line arguments +- Output: Exit code (0 or 1), rendered output to stdout + +The CLI layer is deliberately thin. It does no scanning and no output formatting. It builds config, calls the runner, picks the formatter, and exits. + +### Runner (runner.nim) + +**Purpose:** Map enabled module categories to their collector implementations and aggregate results. + +**Responsibilities:** +- Maintain the `Category → CollectorProc` routing table via `getCollector` +- Iterate through `config.enabledModules` and invoke each collector +- Time the full scan duration +- Sum severity counts into the report summary + +**Interfaces:** +- Input: `HarvestConfig` +- Output: `Report` (metadata + results + summary) + +The runner doesn't know how collectors work internally. It gets a function pointer from `getCollector`, calls it, and collects the result. This means adding a new collector category requires only adding a case to the routing table and importing the module. + +### Collectors (collectors/*.nim) + +**Purpose:** Each collector scans for one category of credential exposure and returns findings. + +**Responsibilities:** +- Scan known file paths for the category +- Analyze file contents and permissions +- Construct `Finding` objects with severity classification +- Return a `CollectorResult` with all findings and any errors + +**Shared interface:** Every collector exports a `collect` proc with the signature: + +```nim +proc collect*(config: HarvestConfig): CollectorResult {.nimcall, raises: [].} +``` + +This uniform signature is what makes the routing table work. The runner doesn't need to know which collector it's calling. + +### Base Utilities (collectors/base.nim) + +**Purpose:** Provide shared file system operations and finding construction helpers. + +**Responsibilities:** +- Safe file/directory existence checks (catch filesystem exceptions) +- POSIX stat-based permission inspection +- File metadata extraction (size, modification time, permissions string) +- Finding and credential factory functions +- Permission-based severity calculation +- Value redaction for credential previews + +The base module is the only place that directly calls POSIX syscalls. All collectors go through `safeFileExists`, `readFileContent`, `getNumericPerms`, and the `makeFinding`/`makeFindingWithCred` constructors rather than using `os` and `posix` directly. + +### Output Renderers (output/*.nim) + +**Purpose:** Transform a `Report` into human-readable or machine-readable output. + +**terminal.nim:** +- Renders the ASCII banner, module headers with box drawing, severity badges with ANSI colors, finding details, and a summary footer +- Handles visual-length calculation for strings containing ANSI escape codes and multi-byte UTF-8 characters +- Respects `--quiet` (suppress banner) and `--verbose` (show empty modules) + +**json.nim:** +- Converts the entire `Report` to a nested JSON structure +- Writes to stdout and optionally to a file via `--output` +- Uses Nim's stdlib `json` module for serialization + +Both renderers are read-only consumers of the `Report` type. They don't modify data or trigger side effects beyond writing to stdout/file. + +## Data Flow + +### Primary Flow: Full Scan + +1. User runs `credenum --modules ssh,cloud --format terminal` +2. `parseCli()` parses arguments into `HarvestConfig` with `enabledModules = @[catSsh, catCloud]` +3. `main()` calls `runCollectors(config)` +4. Runner iterates `[catSsh, catCloud]`: + - Looks up `ssh.collect` via `getCollector(catSsh)`, calls it + - `ssh.collect` calls `scanKeys`, `scanConfig`, `scanAuthorizedKeys`, `scanKnownHosts` + - Each sub-scanner uses `base.nim` to check files, read contents, analyze permissions + - Returns `CollectorResult` with findings and timing + - Same for `cloud.collect` → `scanAws`, `scanGcp`, `scanAzure`, `scanKubernetes` +5. Runner sums severity counts into `Report.summary` +6. `main()` adds UTC timestamp to metadata +7. `renderTerminal(report, quiet, verbose)` writes formatted output +8. If any CRITICAL or HIGH findings exist, exit with code 1 + +### Secondary Flow: Dry Run + +1. User runs `credenum --dry-run` +2. `parseCli()` sets `config.dryRun = true` +3. `main()` calls `renderDryRun(config)` which prints the module list and target directory +4. Exit with code 0 (no scanning occurs) + +### Finding Construction Flow + +When a collector discovers a credential file: + +``` +File exists? (safeFileExists) + │ + ▼ yes +Read content (readFileContent) + │ + ▼ +Analyze content (is it a private key? does it contain tokens?) + │ + ▼ +Check permissions (getNumericPerms → isWorldReadable, isGroupReadable) + │ + ▼ +Determine severity (content analysis + permission analysis) + │ + ▼ +Build Finding: + ├── makeFinding(path, desc, category, severity) [no credential detail] + └── makeFindingWithCred(path, desc, category, sev, cred) [with credential detail] + │ + ▼ + Finding includes: + path, category, severity, description, + permissions (from getPermsString), + modified (from getModifiedTime), + size (from getFileSizeBytes), + credential (optional: source, type, preview, metadata) +``` + +## Type System Design + +### Core Types (types.nim) + +``` + ┌───────────────┐ + │ Report │ + ├───────────────┤ + │ metadata │──── ReportMetadata + │ results │──── seq[CollectorResult] + │ summary │──── array[Severity, int] + └───────────────┘ + │ + ┌──────────┴──────────┐ + ▼ ▼ + ┌─────────────────┐ ┌─────────────────┐ + │ ReportMetadata │ │ CollectorResult │ + ├─────────────────┤ ├─────────────────┤ + │ timestamp │ │ name │ + │ target │ │ category │ + │ version │ │ findings │──── seq[Finding] + │ durationMs │ │ durationMs │ + │ modules │ │ errors │ + └─────────────────┘ └─────────────────┘ + │ + ▼ + ┌─────────────────┐ + │ Finding │ + ├─────────────────┤ + │ path │ + │ category │──── Category enum + │ severity │──── Severity enum + │ description │ + │ credential │──── Option[Credential] + │ permissions │ + │ modified │ + │ size │ + └─────────────────┘ + │ + ┌─────────┴────────┐ + ▼ ▼ + ┌──────────────┐ ┌──────────────┐ + │ Severity │ │ Credential │ + ├──────────────┤ ├──────────────┤ + │ svInfo │ │ source │ + │ svLow │ │ credType │ + │ svMedium │ │ preview │ + │ svHigh │ │ metadata │ + │ svCritical │ └──────────────┘ + └──────────────┘ +``` + +### Why This Structure + +**Severity as an enum with string values:** Each severity level maps directly to a display label (`"info"`, `"low"`, `"medium"`, `"high"`, `"critical"`). Using an enum rather than strings means severity comparisons are integer operations, severity can be used as an array index (for colors, labels, and summary counts), and invalid severity values are caught at compile time. + +**Finding with Optional Credential:** Not every finding has credential details. An SSH directory with wrong permissions is a finding but doesn't have a credential to display. A `.pgpass` file has specific entries worth showing. The `Option[Credential]` type makes this explicit rather than using an empty Credential struct. + +**Credential with metadata Table:** Different credential types have different attributes. An AWS credential has profile count, static/session key counts. An SSH key has encryption status. A Kubernetes config has context and user counts. A `Table[string, string]` metadata field handles this variation without needing a different Credential type per category. + +**CollectorResult with errors seq:** Filesystem operations can fail (permissions denied, broken symlinks, missing directories). Rather than aborting, collectors catch errors and add them to the errors list. The output renderer shows these errors alongside findings so the user knows what couldn't be scanned. + +**Report.summary as array[Severity, int]:** Using the Severity enum as an array index gives O(1) lookup for severity counts and makes iteration natural: `for sev in Severity: report.summary[sev]`. + +### Configuration Types + +```nim +HarvestConfig = object + targetDir: string # Which home directory to scan + enabledModules: seq[Category] # Which collectors to run + excludePatterns: seq[string] # Paths/names to skip + outputFormat: OutputFormat # terminal, json, or both + outputPath: string # File path for JSON output + dryRun: bool # Preview mode + quiet: bool # Suppress banner + verbose: bool # Show empty modules +``` + +The config object is passed to every collector by value. Collectors never modify it. + +```nim +CollectorProc = proc(config: HarvestConfig): CollectorResult {.nimcall, raises: [].} +``` + +The `CollectorProc` type alias defines the contract every collector must satisfy. The `raises: []` pragma means the proc cannot raise exceptions. This is enforced at compile time by `{.push raises: [].}` at the top of every file. + +## Design Patterns + +### Strategy Pattern (Collector Dispatch) + +The runner uses the strategy pattern for collector dispatch. Each Category maps to a collector function with an identical signature: + +``` +getCollector(catBrowser) → browser.collect +getCollector(catSsh) → ssh.collect +getCollector(catCloud) → cloud.collect +... +``` + +This is a case statement rather than a table because Nim's exhaustive case checking ensures every Category has a handler. If you add a new Category to the enum, the compiler forces you to handle it in `getCollector`. + +### Factory Pattern (Finding Construction) + +The `makeFinding` and `makeFindingWithCred` functions in base.nim are factories that handle the boilerplate of constructing a Finding: looking up permissions, modification time, and file size. Collectors call the factory with just the path, description, category, and severity. This prevents inconsistencies where one collector forgets to set the modification time or uses a different permissions format. + +### Layered Scanning (Within Collectors) + +Each collector internally uses a layered scanning approach where sub-scanners handle specific aspects: + +``` +ssh.collect + ├── scanKeys # Private key files + ├── scanConfig # SSH client configuration + ├── scanAuthorizedKeys # Authorized public keys + └── scanKnownHosts # Known host entries +``` + +Sub-scanners modify the `CollectorResult` in-place via `var` parameter rather than returning separate results. This avoids allocation overhead from merging multiple sequences. + +## Severity Classification Model + +The severity model has two inputs: content analysis and permission analysis. The higher severity wins. + +``` +Content-based severity: + ┌─────────────────────────────────────────────────┐ + │ Unencrypted SSH key → HIGH │ + │ Encrypted SSH key → INFO │ + │ Plaintext Git credentials → HIGH │ + │ AWS static keys (AKIA) → HIGH │ + │ GCP service account key → HIGH │ + │ Database password file → HIGH │ + │ Secret in shell history → HIGH │ + │ Config file (no secrets) → INFO │ + └─────────────────────────────────────────────────┘ + +Permission-based override: + ┌─────────────────────────────────────────────────┐ + │ World-readable (0o004 bit) → CRITICAL │ + │ Group-readable (0o040 bit) → MEDIUM or HIGH │ + │ Looser than expected → LOW │ + │ Owner-only → (no override) │ + └─────────────────────────────────────────────────┘ +``` + +The final severity is the maximum of content-based and permission-based. An encrypted SSH key (INFO from content) that's world-readable (CRITICAL from permissions) becomes CRITICAL. + +## Configuration Architecture + +### Zero External Configuration + +The tool has no configuration files, no environment variables, and no dotfiles. All configuration comes from CLI flags with sensible defaults: + +| Setting | Default | Override | +|---------|---------|----------| +| Target directory | Current user's home | `--target ` | +| Enabled modules | All 7 | `--modules ssh,git,...` | +| Exclude patterns | None | `--exclude .cache,vendor` | +| Output format | Terminal | `--format json\|both` | +| Output file | None (stdout only) | `--output report.json` | + +This is intentional. A security tool that reads config from the filesystem creates a circular dependency: you're scanning for exposed configuration while relying on configuration that could itself be tampered with. + +### Constants Architecture + +All scan targets, patterns, and thresholds are defined as compile-time constants in `config.nim`. This means: + +- No runtime configuration parsing +- No allocation for path lists or pattern arrays +- Compiler can inline and optimize all constant lookups +- Adding a new scan target is a one-line change to a constant array + +The constants are organized by category (SSH, AWS, GCP, browser, etc.) with clear groupings. UI constants (colors, box-drawing characters, severity labels) are also in config.nim to keep them centralized. + +## Error Handling Strategy + +### No Exceptions, Ever + +Every file in the project starts with `{.push raises: [].}`. This Nim pragma tells the compiler that no proc in this file is allowed to raise an exception. Any call to a function that might raise (file I/O, string operations, etc.) must be wrapped in try/except. + +This is enforced at compile time. If you add a call to `readFile()` without a try/except, the compiler will reject it with an error showing exactly which function could raise and where. + +### Error Recovery Pattern + +Every file system operation follows the same pattern: + +1. Try the operation +2. On success, process the result +3. On failure, either return a safe default or add to the error list + +``` +safeFileExists() → returns false on exception +readFileContent() → returns "" on exception +getNumericPerms() → returns -1 on exception +walkDir() → caught at collector level, added to errors[] +``` + +Collectors never abort. A failed directory walk adds an error message and continues scanning other paths. The final report shows both findings and errors, so the user knows what was scanned and what was skipped. + +### Exit Codes + +| Code | Meaning | +|------|---------| +| 0 | Scan completed, no HIGH or CRITICAL findings | +| 1 | Scan completed, HIGH or CRITICAL findings detected | + +The exit code is determined after all output is rendered. This allows the tool to be used in scripts and CI pipelines: `credenum --quiet --format json -o report.json && echo "clean" || echo "findings detected"`. + +## Performance Considerations + +### Sequential Collector Execution + +Collectors run sequentially, not in parallel. This is a deliberate choice: + +- Most collectors complete in under 10ms (the target is a single home directory, not a network scan) +- Sequential execution produces deterministic output ordering +- No thread synchronization overhead +- No risk of TOCTOU races on file metadata +- Total scan time is typically under 100ms for a full home directory + +If parallel execution were needed (e.g., for network shares or very large home directories), the uniform `CollectorProc` signature makes parallelization straightforward: spawn each collector in a thread and collect results. + +### Memory Efficiency + +- All constant data (paths, patterns, colors) is compiled into the binary, not allocated at runtime +- File contents are read entirely into strings (not streamed) because credential files are small (typically under 1KB) +- Shell history scanning caps at 50,000 lines to prevent unbounded memory usage on extremely large history files +- Recursive directory walking (for .env files and .kdbx databases) is depth-limited to 5 levels +- Findings are collected into sequences that grow as needed, with no pre-allocation + +### Binary Size + +| Build | Size | Notes | +|-------|------|-------| +| Debug | ~2MB | Full symbols, bounds checking | +| Release | ~500KB | Optimized, LTO, stripped | +| Static release | ~400KB | musl, no glibc dependency | +| Compressed | ~150KB | UPX on top of static release | + +The compressed binary is small enough to transfer over slow connections or embed in other tools. + +## Extensibility + +### Adding a New Collector + +To add a new collector (e.g., for container credentials): + +1. Add a new value to the `Category` enum in `types.nim` +2. Add the module name and description to the arrays in `config.nim` +3. Create `collectors/container.nim` implementing `proc collect*(config: HarvestConfig): CollectorResult` +4. Add the import and case branch in `runner.nim`'s `getCollector` + +The compiler will guide you: adding a Category enum value without handling it in the case statement produces a compile error. + +### Adding a New Output Format + +To add a new output format (e.g., SARIF): + +1. Add a value to the `OutputFormat` enum +2. Create `output/sarif.nim` with a `proc renderSarif*(report: Report, outputPath: string)` +3. Add the case branch in `harvester.nim`'s main function + +The output module receives the complete `Report` and has full freedom in how it renders it. + +### Adding New Scan Targets + +To scan for a new credential file within an existing category: + +1. Add the path constant to `config.nim` +2. Add scanning logic in the relevant collector +3. Add a test fixture in `tests/docker/planted/` and a check in `validate.sh` + +No other files need to change. + +## Limitations + +**Linux-only.** The tool targets Linux credential storage paths. macOS stores credentials differently (Keychain, different browser paths). Windows has an entirely different model (Credential Manager, DPAPI). Supporting other platforms would require platform-specific collector implementations. + +**Read-only.** The tool detects but doesn't remediate. It won't fix permissions, encrypt keys, or rotate credentials. This is intentional: a scanning tool should never modify the filesystem it's inspecting. + +**Static paths.** Credential paths are compiled into the binary as constants. Non-standard installations (e.g., Firefox installed via Flatpak, Snap-based browsers, custom HOME directory layouts) may store credentials in different locations that the tool doesn't check. + +**No credential decryption.** The tool identifies encrypted credential stores but doesn't attempt to decrypt them. It checks whether encryption is present and whether file permissions expose the encrypted data, but it doesn't evaluate encryption strength or attempt brute-force. + +## Key Files Reference + +| File | Purpose | +|------|---------| +| `src/harvester.nim` | Entry point, CLI parsing, main loop | +| `src/runner.nim` | Collector dispatch and result aggregation | +| `src/types.nim` | All type definitions | +| `src/config.nim` | All constants (paths, patterns, colors, UI) | +| `src/collectors/base.nim` | Shared utilities for all collectors | +| `src/collectors/*.nim` | One file per credential category | +| `src/output/terminal.nim` | Terminal renderer with box drawing | +| `src/output/json.nim` | JSON serializer | +| `tests/test_all.nim` | Unit tests | +| `tests/docker/` | Docker integration test infrastructure | +| `config.nims` | Nim compiler configuration | +| `Justfile` | Build automation commands | + +## Next Steps + +- Read [03-IMPLEMENTATION.md](./03-IMPLEMENTATION.md) to see how each component is implemented, with code walkthroughs of the CLI parser, collector modules, permission analysis, and output rendering +- Read [04-CHALLENGES.md](./04-CHALLENGES.md) for extension ideas diff --git a/PROJECTS/intermediate/credential-enumeration/learn/03-IMPLEMENTATION.md b/PROJECTS/intermediate/credential-enumeration/learn/03-IMPLEMENTATION.md new file mode 100644 index 00000000..584d9e8b --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/learn/03-IMPLEMENTATION.md @@ -0,0 +1,806 @@ +# 03-IMPLEMENTATION.md + +# Implementation + +This document walks through the actual code: how each component works, why it's structured the way it is, and what patterns to look for when reading or extending the codebase. + +## File Structure + +``` +src/ +├── harvester.nim # Entry point — CLI parsing, main orchestration +├── runner.nim # Collector dispatch — maps categories to collectors +├── types.nim # Type definitions — Finding, Report, Severity, etc +├── config.nim # Constants — every path, pattern, threshold, color +├── collectors/ +│ ├── base.nim # Shared utils — file ops, permissions, factories +│ ├── browser.nim # Firefox + Chromium credential stores +│ ├── ssh.nim # SSH keys, config, authorized_keys +│ ├── git.nim # .git-credentials, tokens, config helpers +│ ├── cloud.nim # AWS, GCP, Azure, Kubernetes +│ ├── history.nim # Shell history, command patterns, .env files +│ ├── keyring.nim # GNOME, KDE, KeePass, pass, Bitwarden +│ └── apptoken.nim # DB creds, dev tokens, infra tokens, Docker +└── output/ + ├── terminal.nim # Color terminal with box drawing + └── json.nim # Structured JSON output +``` + +## The Zero-Exception Foundation + +Every source file opens with: + +```nim +{.push raises: [].} +``` + +This Nim pragma tells the compiler: no procedure in this file may propagate an exception. Any call to a function that might raise (file I/O, string parsing, etc.) must be wrapped in try/except within this file. The compiler enforces this at compile time and will reject code that could propagate an uncaught exception. + +This matters for a security tool because crashing mid-scan means partial results, missed findings, and unreliable output. The `{.push raises: [].}` guarantee means that if the tool compiles, it will not crash from unhandled exceptions during a scan. + +Where an operation genuinely can't avoid exceptions (like calling `readFile`), the pattern is: + +```nim +proc readFileContent*(path: string): string = + try: + result = readFile(path) + except CatchableError: + result = "" +``` + +The exception is caught immediately and converted to a safe default. This pattern repeats throughout `base.nim` for every filesystem operation. + +## Entry Point: harvester.nim + +### CLI Parsing + +The `parseCli` function builds a `HarvestConfig` from command-line arguments using Nim's stdlib `parseopt`: + +```nim +proc parseCli(): HarvestConfig = + result = defaultConfig() + + var parser = initOptParser( + commandLineParams(), + shortNoVal = {'d', 'q', 'v', 'h'}, + longNoVal = @["dry-run", "quiet", "verbose", "help", "version"] + ) +``` + +The `shortNoVal` and `longNoVal` parameters tell the parser which flags don't take values. Without this, `--quiet --format json` would try to parse `--format` as the value of `--quiet`. + +The parser loop uses `case parser.key.toLowerAscii()` for case-insensitive matching, so `--Target`, `--target`, and `--TARGET` all work. Each recognized flag updates the result config. Unrecognized flags are silently discarded (`else: discard`). + +### Module Parsing + +The `parseModules` function converts comma-separated module names to a sequence of `Category` values: + +```nim +proc parseModules*(input: string): seq[Category] = + result = @[] + let parts = input.split(",") + for part in parts: + let name = part.strip().toLowerAscii() + for cat in Category: + if ModuleNames[cat] == name: + result.add(cat) + break +``` + +This iterates through every `Category` enum value and checks if the module name matches. Unknown module names are silently skipped. The `ModuleNames` array in config.nim maps each Category to its string name, so the mapping is defined in one place. + +### Main Orchestration + +The `main` function is the control flow hub: + +```nim +proc main() = + let conf = parseCli() + + if conf.dryRun: + renderDryRun(conf) + quit(0) + + var report = runCollectors(conf) + report.metadata.timestamp = now().utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + + case conf.outputFormat + of fmtTerminal: renderTerminal(report, conf.quiet, conf.verbose) + of fmtJson: renderJson(report, conf.outputPath) + of fmtBoth: + renderTerminal(report, conf.quiet, conf.verbose) + renderJson(report, conf.outputPath) + + var hasHighSeverity = false + for sev in [svCritical, svHigh]: + if report.summary[sev] > 0: + hasHighSeverity = true + break + + if hasHighSeverity: quit(1) else: quit(0) +``` + +The timestamp is set after the scan completes (not before) so it reflects when results were produced. The `{.cast(raises: []).}` pragma is used around the time formatting because `now()` and `format()` could technically raise, but in practice never do with valid format strings. The cast tells the compiler "I know what I'm doing here." + +The exit code check only looks at CRITICAL and HIGH. MEDIUM, LOW, and INFO findings don't trigger a non-zero exit. This keeps the tool useful in CI pipelines where you want to fail on genuine exposures but not on informational notes. + +## Runner: runner.nim + +### Collector Dispatch + +The `getCollector` function is the routing table: + +```nim +proc getCollector(cat: Category): CollectorProc = + case cat + of catBrowser: browser.collect + of catSsh: ssh.collect + of catCloud: cloud.collect + of catHistory: history.collect + of catKeyring: keyring.collect + of catGit: git.collect + of catApptoken: apptoken.collect +``` + +Nim's case statement on an enum is exhaustive: if you add a new `Category` value without adding a case branch, the compiler will reject the code. This compile-time guarantee prevents the "forgot to wire up the new module" class of bug. + +The return type `CollectorProc` is a function pointer type defined in types.nim: + +```nim +CollectorProc = proc(config: HarvestConfig): CollectorResult {.nimcall, raises: [].} +``` + +The `{.nimcall, raises: [].}` calling convention means the function uses Nim's native calling convention and cannot raise exceptions. This contract is enforced at the type level. + +### Result Aggregation + +The `runCollectors` function iterates enabled modules, calls each collector, and builds the report: + +```nim +proc runCollectors*(config: HarvestConfig): Report = + let start = getMonoTime() + var results: seq[CollectorResult] = @[] + var moduleNames: seq[string] = @[] + + for cat in config.enabledModules: + moduleNames.add(ModuleNames[cat]) + let collector = getCollector(cat) + let res = collector(config) + results.add(res) + + let elapsed = getMonoTime() - start + + var summary: array[Severity, int] + for res in results: + for finding in res.findings: + inc summary[finding.severity] +``` + +The use of `getMonoTime()` (monotonic clock) instead of `now()` for timing is important. Monotonic time is immune to clock adjustments and NTP corrections. If the system clock changes during a scan, `getMonoTime()` still gives an accurate duration. + +The summary array uses `Severity` as the index type, so `summary[svCritical]` directly gives the count of critical findings. The `inc` proc increments in-place without allocation. + +## Base Utilities: collectors/base.nim + +### Permission Inspection + +The permission checking functions use POSIX `stat` directly: + +```nim +proc getPermsString*(path: string): string = + var statBuf: Stat + try: + if stat(path.cstring, statBuf) == 0: + let mode = statBuf.st_mode and 0o7777 + result = "0" & toOct(mode.int, 3) + else: + result = "unknown" + except CatchableError: + result = "unknown" +``` + +The `stat` syscall retrieves file metadata from the kernel. The mode field contains the permission bits in the lower 12 bits (mask `0o7777`). The `toOct` function converts to octal representation. The string "0" prefix produces the familiar format: "0600", "0644", etc. + +The world-readable and group-readable checks extract specific bits: + +```nim +proc isWorldReadable*(path: string): bool = + var statBuf: Stat + try: + if stat(path.cstring, statBuf) == 0: + result = (statBuf.st_mode.int and WorldReadBit) != 0 + except CatchableError: + discard +``` + +`WorldReadBit` is the constant `0o004` from config.nim. The bitwise AND isolates just the "others read" bit. If it's non-zero, the file is world-readable. + +### Severity from Permissions + +The `permissionSeverity` function encapsulates the permission-to-severity logic: + +```nim +proc permissionSeverity*(path: string, isDir: bool = false): Severity = + let perms = getNumericPerms(path) + if perms < 0: + return svInfo + if (perms and WorldReadBit) != 0: + return svCritical + if (perms and GroupReadBit) != 0: + return svMedium + let expected = if isDir: OwnerOnlyDirPerms else: OwnerOnlyFilePerms + if perms > expected: + return svLow + result = svInfo +``` + +Negative perms means the stat call failed (file doesn't exist or we can't read metadata). World-readable is always CRITICAL. Group-readable is MEDIUM. Anything looser than the expected permissions (0600 for files, 0700 for directories) is LOW. Correct permissions are INFO. + +### Finding Factories + +The two factory functions construct `Finding` objects with consistent metadata: + +```nim +proc makeFinding*( + path: string, + description: string, + category: Category, + severity: Severity +): Finding = + Finding( + path: path, + category: category, + severity: severity, + description: description, + credential: none(Credential), + permissions: getPermsString(path), + modified: getModifiedTime(path), + size: getFileSizeBytes(path) + ) +``` + +Every finding automatically gets the current permissions, modification time, and file size of the target path. Collectors don't need to remember to look these up. The `makeFindingWithCred` variant takes an additional `Credential` parameter wrapped in `some()`. + +### Value Redaction + +The `redactValue` function shows the first N characters and replaces the rest: + +```nim +proc redactValue*(value: string, showChars: int = 4): string = + if value.len <= showChars: + result = "*".repeat(value.len) + else: + result = value[0 ..< showChars] & "*".repeat(value.len - showChars) +``` + +For values shorter than or equal to `showChars`, the entire value is masked. For longer values, the first 4 characters are shown. This gives enough context to identify the credential type (e.g., "ghp_" for GitHub tokens, "AKIA" for AWS keys) without exposing the full secret. + +## Collector Implementations + +### SSH Collector: ssh.nim + +The SSH collector has four sub-scanners: `scanKeys`, `scanConfig`, `scanAuthorizedKeys`, and `scanKnownHosts`. + +**Key scanning** is the most complex sub-scanner. It walks the `~/.ssh/` directory, reads each file, and checks if it starts with a PEM header: + +```nim +proc isPrivateKey*(content: string): bool = + for header in SshKeyHeaders: + if content.startsWith(header): + return true +``` + +`SshKeyHeaders` in config.nim contains all five PEM header formats. The check uses `startsWith` rather than `contains` because PEM headers must be at the start of the file. + +Once a private key is found, encryption detection checks for known markers: + +```nim +proc isEncrypted*(content: string): bool = + for marker in SshEncryptedMarkers: + if marker in content: + return true +``` + +The severity calculation combines encryption status and permissions: + +```nim +if not encrypted: + sev = svHigh +else: + sev = svInfo + +if perms >= 0 and (perms and WorldReadBit) != 0: + sev = svCritical +elif perms >= 0 and (perms and GroupReadBit) != 0: + if sev < svHigh: + sev = svHigh +``` + +An unencrypted key starts at HIGH. An encrypted key starts at INFO. Then permissions override upward: world-readable forces CRITICAL regardless of encryption. Group-readable escalates to at least HIGH. + +**Config scanning** looks for weak settings: + +```nim +if stripped.toLowerAscii().startsWith("passwordauthentication yes"): + weakSettings.add("PasswordAuthentication enabled") + +if stripped.toLowerAscii().startsWith("stricthostkeychecking no"): + weakSettings.add("StrictHostKeyChecking disabled") +``` + +These are MEDIUM findings because they weaken the SSH connection security but don't directly expose credentials. + +### Browser Collector: browser.nim + +**Firefox scanning** starts by reading `profiles.ini` to find profile directories: + +```nim +let lines = readFileLines(profilesIniPath) +var profiles: seq[string] = @[] +var currentPath = "" + +for line in lines: + let stripped = line.strip() + if stripped.startsWith("[Profile"): + if currentPath.len > 0: + profiles.add(currentPath) + currentPath = "" + + if stripped.toLowerAscii().startsWith("path="): + currentPath = stripped.split("=", maxsplit = 1)[1] +``` + +This is a minimal INI parser that extracts the `Path=` value from each `[Profile*]` section. The `maxsplit = 1` is important because profile paths themselves might contain `=` characters. + +For each profile, the scanner checks three credential files: + +```nim +let credFiles = [ + (FirefoxLoginsFile, "Firefox stored logins database"), + (FirefoxCookiesDb, "Firefox cookies database"), + (FirefoxKeyDb, "Firefox key database") +] + +for (fileName, desc) in credFiles: + let filePath = profileDir / fileName + if safeFileExists(filePath): + let sev = if isWorldReadable(filePath): svCritical + elif isGroupReadable(filePath): svHigh + else: svMedium +``` + +Note that browser credential files are always at least MEDIUM severity even with correct permissions. This is because the files themselves contain sensitive data (encrypted passwords, session cookies) that could be exfiltrated and attacked offline. + +**Chromium scanning** follows a similar pattern but handles multiple browser variants (Chrome, Brave, Vivaldi, Chromium) and numbered profiles (`Default`, `Profile 1`, `Profile 2`, etc.). + +### Cloud Collector: cloud.nim + +**AWS scanning** demonstrates the most detailed credential analysis. It reads the credentials file line by line, counting profiles and classifying key types: + +```nim +if stripped.toLowerAscii().startsWith("aws_access_key_id"): + let parts = stripped.split("=", maxsplit = 1) + if parts.len == 2: + let keyVal = parts[1].strip() + if keyVal.startsWith(AwsStaticKeyPrefix): + inc staticKeys + elif keyVal.startsWith(AwsSessionKeyPrefix): + inc sessionKeys +``` + +Static keys (prefix `AKIA`) are long-lived and escalate severity to HIGH. Session keys (prefix `ASIA`) are temporary and less dangerous. The distinction matters for remediation prioritization. + +**Kubernetes scanning** parses the kubeconfig YAML to count contexts and users, and to detect authentication methods: + +```nim +if "token:" in stripped: + hasTokenAuth = true +if "client-certificate-data:" in stripped: + hasCertAuth = true +``` + +Token authentication is HIGH severity because bearer tokens provide direct API access. Certificate authentication is noted but not escalated because certificates are harder to use in isolation. + +### History Collector: history.nim + +**Secret pattern matching** checks for known environment variable patterns: + +```nim +proc matchesSecretPattern*(line: string): bool = + let upper = line.toUpperAscii() + for pattern in SecretPatterns: + if pattern in upper: + if "export " in line.toLowerAscii() or + line.strip().startsWith(pattern.split("=")[0]): + return true +``` + +The double check (pattern in upper AND either `export` or starts with key name) prevents false positives. `PATH=/usr/bin` contains `=` but doesn't match `KEY=`, `TOKEN=`, or `PASSWORD=`. The function requires both the pattern match and evidence that it's an actual variable assignment. + +**Command pattern matching** uses a custom glob-like matcher: + +```nim +proc matchesCommandPattern*(line: string): bool = + let lower = line.toLowerAscii() + for pattern in HistoryCommandPatterns: + let parts = pattern.split(".*") + if parts.len >= 2: + var allFound = true + var searchFrom = 0 + for part in parts: + let idx = lower.find(part, start = searchFrom) + if idx < 0: + allFound = false + break + searchFrom = idx + part.len + if allFound: + return true +``` + +Patterns like `"curl.*-h.*authoriz"` are split on `".*"` and each segment is searched sequentially. The `searchFrom` index ensures segments match in order. This implements a basic regex-like matching without pulling in a regex library. + +**History line limits** prevent resource exhaustion: + +```nim +const MaxHistoryLines = 50000 +``` + +A developer who's been using the same shell for years might have hundreds of thousands of history entries. Scanning all of them would be slow and memory-intensive. The 50,000 line cap covers the most recent history (where secrets are most likely still valid) while keeping resource usage bounded. + +**Recursive .env scanning** uses depth-limited directory walking: + +```nim +proc walkForEnv( + dir: string, + depth: int, + excludePatterns: seq[string], + result: var CollectorResult +) = + if depth > MaxEnvDepth: + return +``` + +The depth limit of 5 and directory exclusions (`node_modules`, `vendor`, `.git`, `__pycache__`, `.venv`, `.cache`) keep the recursive walk fast. Without these limits, scanning a directory with deeply nested `node_modules` would take minutes. + +### Keyring Collector: keyring.nim + +The keyring collector scans five different credential stores. The KeePass scanner is notable for its recursive file search: + +```nim +proc walkForKdbx( + dir: string, + depth: int, + excludePatterns: seq[string], + result: var CollectorResult +) = + if depth > 5: + return +``` + +KeePass database files (`.kdbx`) can be stored anywhere in the home directory, not just in a standard location. The recursive walk finds them wherever they are, while the depth limit and directory exclusions prevent runaway scanning. + +The pass (password-store) scanner counts GPG-encrypted entries: + +```nim +for kind, path in walkDir(passDir, relative = false): + if kind == pcFile and path.endsWith(".gpg"): + inc entryCount +``` + +The count of entries tells the user (or attacker) how many credentials are stored, even though the entries themselves are GPG-encrypted and not directly readable. + +### App Token Collector: apptoken.nim + +The app token collector uses a generic `AppTarget` type to handle application data directories: + +```nim +type + AppTarget = object + path: string + name: string + description: string + isDir: bool +``` + +This lets the collector define scan targets as data rather than code: + +```nim +let appTargets = [ + AppTarget(path: SlackDir, name: "Slack", + description: "Slack desktop session data", isDir: true), + AppTarget(path: DiscordDir, name: "Discord", + description: "Discord desktop session data", isDir: true), + ... +] +``` + +Each target is checked with the same logic: does it exist, and what are its permissions? The `isDir` flag determines whether to use file or directory permission checking. + +The database credential scanning is more detailed. For PostgreSQL's `.pgpass`: + +```nim +let lines = readFileLines(pgpassPath) +var entryCount = 0 +for line in lines: + if line.strip().len > 0 and not line.strip().startsWith("#"): + inc entryCount +``` + +Non-comment, non-empty lines are counted as connection entries. The count goes into the credential metadata so the output can show "PostgreSQL password file with 3 entries" rather than just "PostgreSQL password file found." + +## Output Rendering + +### Terminal Renderer: terminal.nim + +The terminal renderer handles the complexity of aligning text in box-drawn tables when strings contain invisible ANSI color codes and multi-byte UTF-8 characters. + +**Visual length calculation** strips ANSI escape sequences and counts only visible characters: + +```nim +proc visualLen(s: string): int = + var i = 0 + while i < s.len: + if s[i] == '\e': + while i < s.len and s[i] != 'm': + inc i + if i < s.len: + inc i + elif (s[i].ord and 0xC0) == 0x80: + inc i + else: + inc result + inc i +``` + +ANSI escapes start with `\e` and end at `m`. UTF-8 continuation bytes have the pattern `10xxxxxx` (the `0xC0` mask checks the top two bits). Only non-escape, non-continuation bytes count as visible characters. This is necessary because a string like `"\e[31mERROR\e[0m"` is 5 visible characters ("ERROR") but 15 bytes long. + +**Box line writing** uses this visual length to pad each line to exactly `BoxWidth` characters: + +```nim +proc writeBoxLine(content: string) = + try: + stdout.write content + let vLen = visualLen(content) + let pad = BoxWidth - vLen - 1 + if pad > 0: + stdout.write " ".repeat(pad) + stdout.writeLine BoxVertical + except CatchableError: + discard +``` + +The `-1` accounts for the closing `BoxVertical` character. This produces perfectly aligned box borders regardless of how many color codes or Unicode characters are in the content. + +**Severity badges** combine color and label: + +```nim +proc sevBadge(sev: Severity): string = + SeverityColors[sev] & ColorBold & " " & SeverityLabels[sev] & " " & ColorReset +``` + +The `SeverityColors` and `SeverityLabels` arrays are indexed by the `Severity` enum, so looking up the color for a severity is a direct array access. + +### JSON Renderer: json.nim + +The JSON renderer converts each type to a `JsonNode` with recursive functions: + +```nim +proc findingToJson(f: Finding): JsonNode = + result = newJObject() + {.cast(raises: []).}: + result["path"] = newJString(f.path) + result["category"] = newJString($f.category) + result["severity"] = newJString($f.severity) + result["description"] = newJString(f.description) + result["permissions"] = newJString(f.permissions) + result["modified"] = newJString(f.modified) + result["size"] = newJInt(f.size) + if f.credential.isSome: + result["credential"] = credentialToJson(f.credential.get()) +``` + +The `{.cast(raises: []).}` block is needed because Nim's JSON operations technically can raise, but in practice won't when building objects from known-good data. The cast is scoped to just the JSON construction block. + +The `$` operator on enum values produces the string representation ("browser", "critical", etc.) defined by the enum value assignments in types.nim. + +## Testing Strategy + +### Unit Tests: test_all.nim + +The unit tests cover pure functions that don't require filesystem state: + +**Redaction tests** verify boundary conditions: + +```nim +suite "redactValue": + test "short value fully redacted": + check redactValue("abc", 4) == "***" + + test "value longer than showChars": + check redactValue("mysecret", 4) == "myse****" +``` + +**Key detection tests** validate all five key formats plus negative cases: + +```nim +suite "isPrivateKey": + test "OpenSSH key": + check isPrivateKey("-----BEGIN OPENSSH PRIVATE KEY-----\ndata") + test "public key rejected": + check isPrivateKey("-----BEGIN PUBLIC KEY-----\ndata") == false +``` + +**Pattern matching tests** cover both true positives and true negatives: + +```nim +suite "matchesSecretPattern": + test "export with KEY=": + check matchesSecretPattern("export API_KEY=some_value") + test "non-secret assignment": + check matchesSecretPattern("export PATH=/usr/bin") == false +``` + +**Permission severity tests** use non-existent paths to test the error handling path: + +```nim +suite "permissionSeverity": + test "returns svInfo for unreadable path": + check permissionSeverity("/nonexistent/path/abc123") == svInfo +``` + +**Module parsing tests** verify the CLI-to-Category conversion including edge cases: + +```nim +suite "parseModules": + test "single module": + check parseModules("ssh") == @[catSsh] + test "unknown module ignored": + check parseModules("fake,nonexistent").len == 0 +``` + +Running tests: `just test` compiles and runs the test suite. + +### Integration Tests: Docker + +The Docker-based integration test creates a controlled environment with known credential files and validates that the scanner detects all of them. + +**The Dockerfile** uses a multi-stage build: + +Stage 1 (builder): Compiles credenum from source using the official Nim Alpine image, producing a static binary. + +Stage 2 (runtime): Ubuntu 24.04 with a `testuser` account. The `planted/` directory is copied into the test user's home directory, creating realistic credential files across all 7 categories. Permissions are explicitly set to create specific severity scenarios (e.g., `chmod 0644` on the unprotected SSH key to make it world-readable). + +**The validation script** runs credenum against the test user's home directory and checks for expected findings: + +```bash +OUTPUT=$(credenum --target /home/testuser --format json 2>&1) || true + +check "SSH unprotected private key" "no passphrase" +check "AWS credentials with static keys" "static keys" +check "Firefox stored logins" "Firefox stored logins" +check "History secret pattern" "Secret in shell history" +``` + +Each `check` function searches the JSON output for an expected string. The validation covers all 7 collector categories with 30+ individual checks. + +Running integration tests: `just docker-test` builds the Docker image and runs the validation. + +## Build System + +### Nim Compiler Configuration: config.nims + +The `config.nims` file configures the Nim compiler without command-line flags: + +```nim +switch("mm", "orc") +``` + +ORC (Overflowing Reference Counting) is Nim's modern memory management. It combines reference counting with a cycle collector, providing deterministic cleanup without a traditional garbage collector pause. + +**Musl static linking:** + +```nim +when defined(musl): + var muslGcc = findExe("musl-gcc") + if muslGcc.len > 0: + switch("gcc.exe", muslGcc) + switch("gcc.linkerexe", muslGcc) + switch("passL", "-static") +``` + +When `-d:musl` is passed, the compiler uses musl-gcc instead of the system gcc, producing a fully static binary with no glibc dependency. This binary runs on any Linux system regardless of glibc version. + +**Cross-compilation:** + +```nim +when defined(crossX86): + switch("passC", "-target x86_64-linux-musl") + switch("passL", "-target x86_64-linux-musl") + switch("os", "linux") + switch("cpu", "amd64") +``` + +The zigcc integration uses Zig's C compiler backend as a cross-compilation toolchain. Passing target triples through `-passC` and `-passL` produces binaries for different architectures without needing a full cross-compilation sysroot. + +### Justfile Build Targets + +The Justfile organizes commands by group: + +**dev** group: `build` (debug), `run` (build + execute), `scan` (build + scan current user), `check` (type check without compilation) + +**prod** group: `release` (optimized), `release-static` (musl), `release-small` (musl + UPX), `build-x86` (cross-compile x86_64), `build-arm64` (cross-compile ARM64) + +**test** group: `test` (unit tests), `docker-build` (build test image), `docker-test` (full integration test) + +**lint** group: `fmt` (format with nph), `fmt-check` (verify formatting) + +The Justfile uses shell variables for DRY configuration: + +```just +bin := "bin/credenum" +src := "src/harvester.nim" +``` + +All build targets reference these variables, so changing the binary name or entry point requires editing one line. + +## Code Organization Principles + +**One file per concern.** Each collector is its own file. Types are separate from config. Output renderers are separate from each other. This means you can understand the browser collector by reading `browser.nim` alone. + +**Constants separate from logic.** All paths, patterns, thresholds, colors, and labels live in `config.nim`. Collectors import `config` to get their scan targets. This separation means you can audit every credential path the tool checks by reading one file. + +**Shared utilities in base, not duplicated.** Permission checking, file reading, finding construction, and redaction are in `base.nim`. No collector reimplements file stat calls or finding construction. + +**Types define the contract.** The `CollectorProc` type ensures every collector has the same signature. The `Report` type defines what output renderers receive. The `Severity` enum's ordering determines comparison behavior. These types are the architecture, enforced by the compiler. + +## Extending the Code + +### Adding a New Collector + +1. Add a new `Category` value in `types.nim` (e.g., `catContainer = "container"`) +2. Add module name and description to arrays in `config.nim` +3. Create `collectors/container.nim`: + - Start with `{.push raises: [].}` + - Import `../types`, `../config`, `base` + - Implement `proc collect*(config: HarvestConfig): CollectorResult` + - Use `newCollectorResult`, `makeFinding`, `makeFindingWithCred` from base.nim +4. Import and route in `runner.nim`: + - `import collectors/container` + - Add `of catContainer: container.collect` to the case statement +5. Add test fixtures in `tests/docker/planted/` and checks in `validate.sh` + +### Adding a New Scan Target to an Existing Collector + +1. Add the path constant to `config.nim` (e.g., `PodmanConfig* = ".config/containers/auth.json"`) +2. Add scanning logic to the relevant collector (e.g., `apptoken.nim`) +3. Add a test fixture and validation check + +### Adding a New Output Format + +1. Add a value to `OutputFormat` in `types.nim` +2. Create `output/sarif.nim` with a render proc +3. Add the case branch and CLI flag handling in `harvester.nim` + +## Dependencies + +The project uses only Nim's standard library. No external packages are required. + +| Import | Purpose | +|--------|---------| +| `std/parseopt` | CLI argument parsing | +| `std/os` | File operations, path manipulation, home directory | +| `std/posix` | POSIX stat for permission inspection | +| `std/strutils` | String operations (split, strip, find, contains) | +| `std/times` | Timestamp formatting | +| `std/monotimes` | Performance timing | +| `std/options` | Optional[Credential] for findings | +| `std/tables` | Credential metadata key-value pairs | +| `std/json` | JSON output construction | +| `std/unittest` | Test framework | + +Zero external dependencies means no supply chain risk, no version conflicts, and no network access needed to build. The entire project compiles from a fresh Nim installation. + +## Next Steps + +- Read [04-CHALLENGES.md](./04-CHALLENGES.md) for extension ideas ranging from new collectors to remediation automation +- Try adding a new scan target to an existing collector. Start with something simple like adding Docker Compose credential detection to the apptoken collector diff --git a/PROJECTS/intermediate/credential-enumeration/learn/04-CHALLENGES.md b/PROJECTS/intermediate/credential-enumeration/learn/04-CHALLENGES.md new file mode 100644 index 00000000..3514617e --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/learn/04-CHALLENGES.md @@ -0,0 +1,390 @@ +# 04-CHALLENGES.md + +# Challenges + +Extension ideas organized by difficulty. Each challenge builds on the existing codebase and teaches specific security or engineering concepts. + +## Easy Challenges + +### 1. Add a Container Credentials Collector + +**What to build:** A new collector module that scans for container runtime credentials beyond what the apptoken collector already checks for Docker. + +**Why it's useful:** Container runtimes like Podman, containerd, and container orchestration tools store authentication and configuration data in the home directory. Podman uses `~/.config/containers/auth.json` for registry authentication. Buildah and Skopeo share the same credential store. Container-specific kubeconfig overrides may exist at `~/.config/containers/containers.conf`. + +**Skills you'll practice:** +- Adding a new collector to the modular architecture +- Working with the Category enum and compiler-enforced exhaustive matching +- Understanding container runtime credential storage + +**Hints:** +- Follow the pattern: add Category value, add to config arrays, create collector file, wire up in runner +- Check `~/.config/containers/` for auth.json, registries.conf, and containers.conf +- Check for `~/.config/podman/` if it exists +- The Docker auth scanning logic in apptoken.nim is a good reference for registry auth parsing + +**How to test:** Add Podman/container fixture files to `tests/docker/planted/.config/containers/` and add checks to `validate.sh`. + +### 2. Add CSV Output Format + +**What to build:** A new output renderer that produces CSV with one row per finding. + +**Why it's useful:** CSV output integrates with spreadsheets, SIEM imports, and data analysis tools. Security teams often need to aggregate findings across multiple hosts into a single dataset. + +**Skills you'll practice:** +- Adding a new output format to the type system +- Working with the Report data structure +- Proper CSV escaping (fields containing commas, quotes, or newlines) + +**Hints:** +- Add `fmtCsv` to the `OutputFormat` enum +- Create `output/csv.nim` with columns: severity, category, path, permissions, modified, description +- Handle CSV escaping: wrap fields in quotes, double any internal quotes +- The JSON renderer is a good structural reference + +**How to test:** Run with `--format csv` and import into a spreadsheet. Verify all findings appear and special characters in descriptions don't break parsing. + +### 3. Add Severity Filtering + +**What to build:** A `--min-severity` flag that filters output to only show findings at or above a given severity level. + +**Why it's useful:** In CI pipelines, you might want to fail only on CRITICAL findings. In audit mode, you might want to see everything including INFO. The current tool shows all findings and only uses HIGH/CRITICAL for exit codes. + +**Skills you'll practice:** +- CLI argument parsing in Nim +- Filtering data after collection +- Working with ordered enums (Severity values have a natural ordering) + +**Hints:** +- Add a `minSeverity` field to `HarvestConfig` +- Parse `--min-severity critical|high|medium|low|info` in the CLI parser +- Filter findings in the runner after collection: `res.findings = res.findings.filterIt(it.severity >= config.minSeverity)` +- Update the summary calculation to only count filtered findings +- Also update the exit code logic to respect the filter + +### 4. Add Timestamp-Based Alerting + +**What to build:** A `--recent ` flag that highlights findings for files modified within the last N days. + +**Why it's useful:** During incident response, you care most about credentials that were recently accessed or modified. A `.git-credentials` file modified yesterday is more suspicious than one unchanged for two years. + +**Skills you'll practice:** +- Date arithmetic in Nim +- Adding visual indicators to the terminal renderer +- Contextual severity adjustment + +**Hints:** +- Parse `--recent 7` in the CLI parser +- In the terminal renderer, add a visual indicator (different color, prefix marker) for findings where the modification time is within the recent window +- Don't change the severity itself, just the visual presentation +- The `modified` field in Finding is already an ISO 8601 timestamp string + +## Intermediate Challenges + +### 5. Add SARIF Output Format + +**What to build:** Output in SARIF (Static Analysis Results Interchange Format), the standard format used by GitHub Code Scanning, Azure DevOps, and other security platforms. + +**Why it's useful:** SARIF is the industry standard for security tool output. Adding SARIF support means credenum results can be uploaded to GitHub Code Scanning, displayed in pull request annotations, and imported into security dashboards. + +**Skills you'll practice:** +- Implementing an industry-standard output format +- Mapping domain-specific data (severity, category) to a standardized schema +- Working with nested JSON structures (SARIF is deeply nested) + +**Implementation approach:** +1. Study the SARIF 2.1.0 schema at https://docs.oasis-open.org/sarif/sarif/v2.1.0/ +2. Create `output/sarif.nim` +3. Map credenum concepts to SARIF: Finding → Result, Category → Rule, Severity → Level +4. SARIF severity levels are: error, warning, note, none. Map CRITICAL/HIGH → error, MEDIUM → warning, LOW/INFO → note +5. Each collector category becomes a "rule" with its own ID and description +6. The `physicalLocation` field uses the file path from each finding + +**How to test:** Upload the output to GitHub Code Scanning using `gh api` or validate against the SARIF schema using a JSON Schema validator. + +### 6. Add Remediation Suggestions + +**What to build:** For each finding, generate a specific remediation command or instruction. + +**Why it's useful:** Finding credentials is half the job. Telling the user exactly how to fix each issue makes the tool actionable rather than just informational. + +**Skills you'll practice:** +- Pattern matching on finding types to generate context-specific advice +- String templating with actual file paths and values +- Understanding proper credential hygiene practices + +**Implementation approach:** +1. Add a `remediation` field to the `Finding` type (or a parallel data structure) +2. After findings are collected, run a remediation pass that matches on category + description patterns +3. Generate specific commands: + - SSH key with bad permissions → `chmod 0600 /home/user/.ssh/id_rsa` + - Unencrypted SSH key → `ssh-keygen -p -f /home/user/.ssh/id_rsa` (adds passphrase) + - World-readable AWS credentials → `chmod 0600 /home/user/.aws/credentials` + - Secret in shell history → `sed -i 'Nd' /home/user/.bash_history` (line N) + - Plaintext .git-credentials → `git config --global credential.helper cache` (switch to cache) +4. Add remediation output to both terminal and JSON renderers +5. Consider a `--fix` flag that applies permission fixes automatically (with confirmation) + +**Extra credit:** Generate a shell script (`--remediate-script fix.sh`) that the user can review and execute. + +### 7. macOS Support + +**What to build:** Platform-specific collectors for macOS credential storage locations. + +**Why it's useful:** macOS stores credentials in different locations than Linux. Browser paths differ (`~/Library/Application Support/`), the Keychain replaces desktop keyrings, and cloud CLI tools may use different config directories. + +**Skills you'll practice:** +- Cross-platform filesystem handling +- Conditional compilation in Nim (`when defined(macosx)`) +- Understanding macOS security model (Keychain, TCC) + +**Implementation approach:** +1. Add platform detection (`when defined(linux)` vs `when defined(macosx)`) +2. Create platform-specific path constants in config.nim +3. For macOS browsers: + - Firefox: `~/Library/Application Support/Firefox/` + - Chrome: `~/Library/Application Support/Google/Chrome/` + - Safari: `~/Library/Cookies/`, `~/Library/Keychains/` +4. For macOS keychains: `~/Library/Keychains/login.keychain-db` +5. Cloud credentials use the same paths on both platforms +6. SSH directory is the same (`~/.ssh/`) +7. History files are the same (`~/.bash_history`, `~/.zsh_history`) +8. macOS-specific: `~/Library/Preferences/` plist files may contain tokens + +**Gotcha:** macOS Transparency, Consent, and Control (TCC) may block access to some directories (e.g., Safari data) unless the terminal has Full Disk Access. + +### 8. Watch Mode + +**What to build:** A `--watch` flag that re-scans at a configurable interval and reports new or changed findings. + +**Why it's useful:** Continuous monitoring catches credentials that appear after the initial scan. A developer pulls a `.env` file, creates a new SSH key, or configures a new cloud provider while the scanner is running in the background. + +**Skills you'll practice:** +- Event loops and periodic execution in Nim +- Diffing structured data (comparing finding sets between runs) +- Terminal refresh without flooding output + +**Implementation approach:** +1. Store the previous scan's findings as a set (keyed by path + category + description) +2. On each re-scan, compare new findings against the previous set +3. Report only new findings, removed findings, and changed severities +4. Use `--watch 30` for scan interval in seconds (default 60) +5. Clear and redraw the summary on each scan, append only new findings + +## Advanced Challenges + +### 9. Network Credential Scanning + +**What to build:** Extend the tool to scan for credentials exposed over network protocols: mounted network shares, NFS exports, SSHFS mounts, and SMB shares. + +**Why it's useful:** In enterprise environments, home directories are often NFS-mounted. Credentials on one machine may be accessible from any machine in the cluster. Network-mounted directories have different permission semantics (the NFS server may ignore local permission checks with `no_root_squash`). + +**Skills you'll practice:** +- Detecting mount points and their types (`/proc/mounts` on Linux) +- Understanding NFS permission models vs local filesystem permissions +- Network-aware scanning and timeout handling + +**Implementation approach:** +1. Parse `/proc/mounts` to identify NFS, CIFS, SSHFS, and other network mounts within the scan target +2. For network mounts, adjust severity: even `0600` permissions may be bypassed by the file server +3. Scan common network credential locations: `/etc/fstab` for stored mount credentials, `~/.smbcredentials` +4. Add a `--network` flag to enable this (disabled by default since it adds latency) +5. Add timeout handling for network paths that may be slow or unreachable + +### 10. Credential Age Analysis + +**What to build:** Track credential file age and flag credentials that haven't been rotated within a policy window. + +**Why it's useful:** A properly permissioned AWS credential file that hasn't been rotated in 18 months is still a risk. Many compliance frameworks (SOC 2, PCI DSS) require credential rotation. This extends the tool from "is it exposed?" to "is it stale?" + +**Skills you'll practice:** +- Date and time analysis against policy thresholds +- Configurable policy definitions +- Compliance mapping (SOC 2, PCI DSS rotation requirements) + +**Implementation approach:** +1. Add a `--max-age ` flag with a default of 90 days +2. For each finding with a modification timestamp, calculate the file age +3. If the credential file is older than the threshold, add a secondary finding or flag +4. Severity for stale credentials: modification time should not change severity, but add a `stale` indicator to the output +5. Consider reading Git history of files like `~/.aws/credentials` to determine when the content (not just metadata) last changed + +### 11. Agent Mode with Remote Reporting + +**What to build:** A daemon mode that runs credenum on a schedule, compares results against a baseline, and sends alerts to a remote endpoint when new findings appear. + +**Why it's useful:** Security teams managing fleets of developer machines need continuous visibility. Rather than running ad-hoc scans, agent mode provides ongoing monitoring with alerting. + +**Skills you'll practice:** +- Building a long-running daemon in Nim +- Baseline management (storing and comparing scan results) +- HTTP client for pushing results to a webhook endpoint +- Systemd service file creation + +**Implementation approach:** +1. Add a `--agent` flag with `--interval ` and `--webhook ` +2. On first run, save the report as a baseline JSON file +3. On subsequent runs, diff the new report against the baseline +4. If new findings appear or existing findings change severity, POST the diff to the webhook +5. Support multiple webhook formats: generic JSON, Slack incoming webhook, PagerDuty events API +6. Write a systemd unit file for deployment as a system service +7. Add `--baseline ` for explicit baseline management + +**Testing strategy:** +- Unit test the diff logic (finding addition, removal, severity change) +- Integration test with a mock HTTP server that receives webhook payloads +- Test the daemon lifecycle (start, scan, sleep, re-scan, shutdown) + +## Expert Challenge + +### 12. Full Credential Lifecycle Platform + +**What to build:** A web dashboard that aggregates credenum results from multiple hosts, tracks credential exposure over time, provides fleet-wide visibility, and integrates with remediation workflows. + +**Prerequisites:** Familiarity with a web framework (FastAPI, Go's net/http, or similar), database design, and frontend basics. + +**What you'll learn:** +- Security operations platform design +- Fleet-wide credential visibility +- Remediation workflow management +- Dashboard design for security operations + +**High-level architecture:** + +``` +┌─────────────┐ ┌─────────────┐ ┌─────────────┐ +│ Host A │ │ Host B │ │ Host C │ +│ credenum │ │ credenum │ │ credenum │ +│ --agent │ │ --agent │ │ --agent │ +└──────┬──────┘ └──────┬──────┘ └──────┬──────┘ + │ │ │ + └────────────────┼────────────────┘ + │ HTTPS/webhook + ▼ + ┌─────────────────┐ + │ Aggregation │ + │ API Server │ + ├─────────────────┤ + │ PostgreSQL │ + │ (findings, │ + │ baselines, │ + │ hosts) │ + └────────┬────────┘ + │ + ▼ + ┌─────────────────┐ + │ Dashboard │ + │ - Fleet view │ + │ - Host detail │ + │ - Trend charts │ + │ - Remediation │ + │ tracking │ + └─────────────────┘ +``` + +**Phased implementation:** + +Phase 1 - Foundation: +- Define the database schema (hosts, scans, findings, baselines) +- Build the API endpoint that receives scan results +- Store and deduplicate findings + +Phase 2 - Core features: +- Fleet overview page: hosts by risk score, worst offenders, recent changes +- Host detail page: full finding list, history, severity trend +- Comparison view: diff between scans +- Search and filter across all findings + +Phase 3 - Integration: +- Remediation workflow: assign findings to owners, track resolution, verify fixes +- Alert rules: new CRITICAL finding triggers PagerDuty/Slack/email +- Compliance view: map findings to SOC 2 / PCI DSS controls +- Export: generate compliance reports in PDF/CSV + +Phase 4 - Polish: +- Host grouping (by team, environment, role) +- SLA tracking (time from detection to remediation) +- API tokens for automation +- Role-based access control + +**Success criteria:** +- [ ] Agents on 3+ test hosts successfully push results to the API +- [ ] Dashboard shows fleet-level risk summary +- [ ] Finding diffs correctly detect new, removed, and changed findings +- [ ] Remediation workflow tracks at least one finding from detection to resolution +- [ ] Trend charts show exposure changes over time + +## Mix and Match + +Combine challenges for larger projects: + +- **Challenges 3 + 6:** Severity filtering with remediation suggestions. Filter to HIGH+, generate a remediation script for just the urgent findings. +- **Challenges 5 + 10:** SARIF output with credential age annotations. Upload to GitHub and flag stale credentials as code scanning alerts. +- **Challenges 8 + 11:** Watch mode that feeds into agent mode. Local real-time monitoring with remote alerting. +- **Challenges 7 + 9:** macOS support plus network credential scanning. Cover both local and network-mounted credentials on both platforms. + +## Performance Challenges + +### Benchmark the Collectors + +Profile each collector's execution time across different home directory sizes. The current timing data per module is a start, but deeper profiling reveals bottlenecks. + +**What to measure:** +- Time per file system call (stat, readFile, walkDir) +- Memory allocation per finding +- Scaling behavior: how does scan time change with 10 vs 100 vs 1000 files in .ssh/? +- Impact of depth limits on history and keyring scanning + +**Tools:** Nim's `--profiler:on` flag, `nimprof`, or custom timing with `getMonoTime()`. + +### Optimize for Large Home Directories + +Developer home directories on shared servers can be massive (100GB+, millions of files). The recursive .env scanner and KeePass scanner will be the bottleneck. + +**Ideas:** +- Use `inotify` to watch for file changes instead of periodic full scans +- Build a file index on first scan and use modification times to skip unchanged files +- Parallelize the recursive walks across different top-level directories + +## Security Challenges + +### Add False Positive Suppression + +Build a `.credenum-ignore` file format that lets users mark known-safe findings (e.g., a test SSH key that's intentionally world-readable). + +**Requirements:** +- Support path-based ignores (`~/.ssh/test_key`) +- Support pattern-based ignores (`*.test.kdbx`) +- Support category-based ignores (`[ssh] id_rsa_test`) +- The ignore file itself should be checked for proper permissions + +### Add Integrity Checking + +Hash credential files and compare against a known-good baseline. If a credential file's content hash changes without an expected rotation event, flag it as potentially tampered. + +### Compliance Mapping + +Map each finding type to specific compliance framework controls: +- SOC 2 CC6.1 (Logical and Physical Access Controls) +- PCI DSS 8.2 (Authentication Policies) +- CIS Controls 5.2 (Use Unique Passwords) +- NIST 800-53 IA-5 (Authenticator Management) + +Output a compliance-focused report that shows which controls have findings. + +## Getting Help + +**Debugging the scanner:** Run with `--verbose` to see all scanned paths including modules with zero findings. This helps identify if a module is looking in the wrong directory or if the target path is incorrect. + +**Debugging a collector:** Add temporary `echo` statements before submitting the finding. Nim's stdlib `echo` works in `{.push raises: [].}` files without wrapping in try/except because `echo` is treated specially by the compiler. + +**Debugging the test suite:** Run `just test` for unit tests. If a specific test fails, the test output shows which `check` assertion failed and the expected vs actual values. For integration tests, `docker run --rm -it credenum-test bash` gives you a shell in the test container where you can run credenum manually. + +**Understanding Nim:** If you're new to Nim, the key concepts that appear in this codebase are: +- Procedures (`proc`) are functions +- `result` is an implicit return variable (equivalent to assigning to the function name) +- `{.push raises: [].}` is a compiler pragma that enforces no-exception contracts +- `{.cast(raises: []).}` overrides the raises check for a specific block +- `seq[T]` is a dynamic array, `array[N, T]` is fixed-size +- `Option[T]` is Nim's Maybe/Optional type (from `std/options`) +- `Table[K, V]` is a hash map (from `std/tables`) diff --git a/PROJECTS/intermediate/credential-enumeration/src/collectors/apptoken.nim b/PROJECTS/intermediate/credential-enumeration/src/collectors/apptoken.nim new file mode 100644 index 00000000..933d0ad4 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/collectors/apptoken.nim @@ -0,0 +1,294 @@ +# ©AngelaMos | 2026 +# apptoken.nim + +{.push raises: [].} + +import std/[strutils, tables, monotimes, times] +import ../types +import ../config +import base + +type + AppTarget = object + path: string + name: string + description: string + isDir: bool + +proc scanAppDir( + config: HarvestConfig, + target: AppTarget, + result: var CollectorResult +) = + let fullPath = expandHome(config, target.path) + if target.isDir: + if not safeDirExists(fullPath): + return + let sev = permissionSeverity(fullPath, isDir = true) + result.findings.add(makeFinding( + fullPath, + target.description, + catApptoken, sev + )) + else: + if not safeFileExists(fullPath): + return + let sev = if isWorldReadable(fullPath): svCritical + elif isGroupReadable(fullPath): svHigh + else: svMedium + result.findings.add(makeFinding( + fullPath, + target.description, + catApptoken, sev + )) + +proc scanDbCredFiles(config: HarvestConfig, result: var CollectorResult) = + let pgpassPath = expandHome(config, PgPass) + if safeFileExists(pgpassPath): + let lines = readFileLines(pgpassPath) + var entryCount = 0 + for line in lines: + if line.strip().len > 0 and not line.strip().startsWith("#"): + inc entryCount + + let sev = if isWorldReadable(pgpassPath): svCritical else: svHigh + + var cred = Credential( + source: pgpassPath, + credType: "postgresql_credentials", + preview: $entryCount & " database connection entries", + metadata: initTable[string, string]() + ) + cred.setMeta("entry_count", $entryCount) + + result.findings.add(makeFindingWithCred( + pgpassPath, + "PostgreSQL password file with " & $entryCount & " entries", + catApptoken, sev, cred + )) + + let mycnfPath = expandHome(config, MyCnf) + if safeFileExists(mycnfPath): + let content = readFileContent(mycnfPath) + let hasPassword = "password" in content.toLowerAscii() + let sev = if isWorldReadable(mycnfPath): svCritical + elif hasPassword: svHigh + else: svMedium + + result.findings.add(makeFinding( + mycnfPath, + "MySQL configuration" & (if hasPassword: " (contains password)" else: ""), + catApptoken, sev + )) + + let redisPath = expandHome(config, RedisConf) + if safeFileExists(redisPath): + let sev = if isWorldReadable(redisPath): svCritical else: svHigh + result.findings.add(makeFinding( + redisPath, + "Redis CLI authentication file", + catApptoken, sev + )) + + let mongoPath = expandHome(config, MongoRc) + if safeFileExists(mongoPath): + let content = readFileContent(mongoPath) + let hasCreds = "password" in content.toLowerAscii() or + "auth" in content.toLowerAscii() + let sev = if isWorldReadable(mongoPath): svCritical + elif hasCreds: svHigh + else: svMedium + + result.findings.add(makeFinding( + mongoPath, + "MongoDB RC file" & (if hasCreds: " (may contain credentials)" else: ""), + catApptoken, sev + )) + +proc scanNetrc(config: HarvestConfig, result: var CollectorResult) = + let path = expandHome(config, NetrcFile) + if not safeFileExists(path): + return + + let content = readFileContent(path) + let lines = content.splitLines() + var machineCount = 0 + var hasPassword = false + + for line in lines: + let stripped = line.strip() + if stripped.toLowerAscii().startsWith("machine "): + inc machineCount + if "password " in stripped.toLowerAscii(): + hasPassword = true + + let sev = if isWorldReadable(path): svCritical + elif hasPassword: svHigh + else: svMedium + + var cred = Credential( + source: path, + credType: "netrc_credentials", + preview: $machineCount & " machine entries", + metadata: initTable[string, string]() + ) + cred.setMeta("machines", $machineCount) + cred.setMeta("has_password", $hasPassword) + + result.findings.add(makeFindingWithCred( + path, + "Netrc credential file with " & $machineCount & " entries", + catApptoken, sev, cred + )) + +proc scanDevTokenFiles(config: HarvestConfig, result: var CollectorResult) = + let npmrcPath = expandHome(config, NpmrcFile) + if safeFileExists(npmrcPath): + let content = readFileContent(npmrcPath) + let hasToken = "_authToken" in content or "_auth" in content + let sev = if isWorldReadable(npmrcPath): svCritical + elif hasToken: svHigh + else: svInfo + + if hasToken: + result.findings.add(makeFinding( + npmrcPath, + "npm registry authentication token", + catApptoken, sev + )) + + let pypircPath = expandHome(config, PypircFile) + if safeFileExists(pypircPath): + let content = readFileContent(pypircPath) + let hasPassword = "password" in content.toLowerAscii() + let sev = if isWorldReadable(pypircPath): svCritical + elif hasPassword: svHigh + else: svMedium + + result.findings.add(makeFinding( + pypircPath, + "PyPI configuration" & (if hasPassword: " (contains credentials)" else: ""), + catApptoken, sev + )) + + let ghPath = expandHome(config, GhCliHosts) + if safeFileExists(ghPath): + let content = readFileContent(ghPath) + let hasOauth = "oauth_token" in content.toLowerAscii() + let sev = if isWorldReadable(ghPath): svCritical + elif hasOauth: svHigh + else: svMedium + + result.findings.add(makeFinding( + ghPath, + "GitHub CLI OAuth token", + catApptoken, sev + )) + +proc scanInfraTokenFiles(config: HarvestConfig, result: var CollectorResult) = + let tfPath = expandHome(config, TerraformCreds) + if safeFileExists(tfPath): + let content = readFileContent(tfPath) + let hasToken = "token" in content.toLowerAscii() + let sev = if isWorldReadable(tfPath): svCritical + elif hasToken: svHigh + else: svMedium + + result.findings.add(makeFinding( + tfPath, + "Terraform Cloud API token", + catApptoken, sev + )) + + let vaultPath = expandHome(config, VaultTokenFile) + if safeFileExists(vaultPath): + let sev = if isWorldReadable(vaultPath): svCritical else: svHigh + + result.findings.add(makeFinding( + vaultPath, + "HashiCorp Vault token", + catApptoken, sev + )) + + let helmPath = expandHome(config, HelmRepos) + if safeFileExists(helmPath): + let content = readFileContent(helmPath) + let hasPassword = "password" in content.toLowerAscii() + let sev = if isWorldReadable(helmPath): svCritical + elif hasPassword: svHigh + else: svInfo + + if hasPassword: + result.findings.add(makeFinding( + helmPath, + "Helm repository credentials", + catApptoken, sev + )) + + let rclonePath = expandHome(config, RcloneConf) + if safeFileExists(rclonePath): + let content = readFileContent(rclonePath) + let hasCreds = "pass" in content.toLowerAscii() or + "token" in content.toLowerAscii() or + "key" in content.toLowerAscii() + let sev = if isWorldReadable(rclonePath): svCritical + elif hasCreds: svHigh + else: svMedium + + result.findings.add(makeFinding( + rclonePath, + "Rclone cloud storage configuration" & + (if hasCreds: " (contains credentials)" else: ""), + catApptoken, sev + )) + +proc scanDockerConfig(config: HarvestConfig, result: var CollectorResult) = + let dockerPath = expandHome(config, DockerConfig) + if not safeFileExists(dockerPath): + return + + let content = readFileContent(dockerPath) + let hasAuth = "\"auth\"" in content or "\"auths\"" in content + let sev = if isWorldReadable(dockerPath): svCritical + elif hasAuth: svHigh + else: svMedium + + var cred = Credential( + source: dockerPath, + credType: "docker_registry_auth", + preview: if hasAuth: "Registry authentication tokens present" else: "No auth data", + metadata: initTable[string, string]() + ) + + result.findings.add(makeFindingWithCred( + dockerPath, + "Docker configuration" & (if hasAuth: " with registry auth tokens" else: ""), + catApptoken, sev, cred + )) + +proc collect*(config: HarvestConfig): CollectorResult = + result = newCollectorResult("apptoken", catApptoken) + let start = getMonoTime() + + let appTargets = [ + AppTarget(path: SlackDir, name: "Slack", + description: "Slack desktop session data", isDir: true), + AppTarget(path: DiscordDir, name: "Discord", + description: "Discord desktop session data", isDir: true), + AppTarget(path: VsCodeDir, name: "VS Code", + description: "VS Code configuration directory", isDir: true), + AppTarget(path: VsCodeUserSettings, name: "VS Code Settings", + description: "VS Code user settings (may contain tokens)", isDir: false) + ] + + for target in appTargets: + scanAppDir(config, target, result) + + scanDbCredFiles(config, result) + scanDockerConfig(config, result) + scanNetrc(config, result) + scanDevTokenFiles(config, result) + scanInfraTokenFiles(config, result) + + let elapsed = getMonoTime() - start + result.durationMs = elapsed.inMilliseconds diff --git a/PROJECTS/intermediate/credential-enumeration/src/collectors/base.nim b/PROJECTS/intermediate/credential-enumeration/src/collectors/base.nim new file mode 100644 index 00000000..f6e3f995 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/collectors/base.nim @@ -0,0 +1,161 @@ +# ©AngelaMos | 2026 +# base.nim + +{.push raises: [].} + +import std/[os, posix, strutils, times, options, tables] +import ../types +import ../config + +proc getPermsString*(path: string): string = + var statBuf: Stat + try: + if stat(path.cstring, statBuf) == 0: + let mode = statBuf.st_mode and 0o7777 + result = "0" & toOct(mode.int, 3) + else: + result = "unknown" + except CatchableError: + result = "unknown" + +proc getModifiedTime*(path: string): string = + try: + let info = getFileInfo(path) + result = $info.lastWriteTime.utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + except CatchableError: + result = "unknown" + +proc getFileSizeBytes*(path: string): int64 = + try: + result = os.getFileSize(path) + except CatchableError: + result = -1 + +proc isWorldReadable*(path: string): bool = + var statBuf: Stat + try: + if stat(path.cstring, statBuf) == 0: + result = (statBuf.st_mode.int and WorldReadBit) != 0 + except CatchableError: + discard + +proc isGroupReadable*(path: string): bool = + var statBuf: Stat + try: + if stat(path.cstring, statBuf) == 0: + result = (statBuf.st_mode.int and GroupReadBit) != 0 + except CatchableError: + discard + +proc getNumericPerms*(path: string): int = + var statBuf: Stat + try: + if stat(path.cstring, statBuf) == 0: + result = statBuf.st_mode.int and 0o7777 + except CatchableError: + result = -1 + +proc expandHome*(config: HarvestConfig, subpath: string): string = + result = config.targetDir / subpath + +proc safeFileExists*(path: string): bool = + try: + result = os.fileExists(path) + except CatchableError: + result = false + +proc safeDirExists*(path: string): bool = + try: + result = os.dirExists(path) + except CatchableError: + result = false + +proc readFileContent*(path: string): string = + try: + result = readFile(path) + except CatchableError: + result = "" + +proc readFileLines*(path: string, maxLines: int = -1): seq[string] = + try: + let content = readFile(path) + let lines = content.splitLines() + if maxLines > 0 and lines.len > maxLines: + result = lines[0 ..< maxLines] + else: + result = lines + except CatchableError: + result = @[] + +proc matchesExclude*(path: string, patterns: seq[string]): bool = + let name = path.extractFilename() + for pattern in patterns: + if pattern == name or ("/" & pattern & "/") in path: + return true + +proc makeFinding*( + path: string, + description: string, + category: Category, + severity: Severity +): Finding = + Finding( + path: path, + category: category, + severity: severity, + description: description, + credential: none(Credential), + permissions: getPermsString(path), + modified: getModifiedTime(path), + size: getFileSizeBytes(path) + ) + +proc makeFindingWithCred*( + path: string, + description: string, + category: Category, + severity: Severity, + cred: Credential +): Finding = + Finding( + path: path, + category: category, + severity: severity, + description: description, + credential: some(cred), + permissions: getPermsString(path), + modified: getModifiedTime(path), + size: getFileSizeBytes(path) + ) + +proc newCollectorResult*(name: string, category: Category): CollectorResult = + CollectorResult( + name: name, + category: category, + findings: @[], + durationMs: 0, + errors: @[] + ) + +proc permissionSeverity*(path: string, isDir: bool = false): Severity = + let perms = getNumericPerms(path) + if perms < 0: + return svInfo + if (perms and WorldReadBit) != 0: + return svCritical + if (perms and GroupReadBit) != 0: + return svMedium + let expected = if isDir: OwnerOnlyDirPerms else: OwnerOnlyFilePerms + if perms > expected: + return svLow + result = svInfo + +proc setMeta*(cred: var Credential, key: string, val: string) = + {.cast(raises: []).}: + cred.metadata[key] = val + +proc redactValue*(value: string, showChars: int = 4): string = + if value.len <= showChars: + result = "*".repeat(value.len) + else: + result = value[0 ..< showChars] & "*".repeat(value.len - showChars) diff --git a/PROJECTS/intermediate/credential-enumeration/src/collectors/browser.nim b/PROJECTS/intermediate/credential-enumeration/src/collectors/browser.nim new file mode 100644 index 00000000..0efe472b --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/collectors/browser.nim @@ -0,0 +1,104 @@ +# ©AngelaMos | 2026 +# browser.nim + +{.push raises: [].} + +import std/[os, strutils, monotimes, times] +import ../types +import ../config +import base + +proc scanFirefox(config: HarvestConfig, result: var CollectorResult) = + let firefoxPath = expandHome(config, FirefoxDir) + if not safeDirExists(firefoxPath): + return + + let profilesIniPath = firefoxPath / FirefoxProfilesIni + if not safeFileExists(profilesIniPath): + return + + let lines = readFileLines(profilesIniPath) + var profiles: seq[string] = @[] + var currentPath = "" + + for line in lines: + let stripped = line.strip() + if stripped.startsWith("[Profile"): + if currentPath.len > 0: + profiles.add(currentPath) + currentPath = "" + + if stripped.toLowerAscii().startsWith("path="): + currentPath = stripped.split("=", maxsplit = 1)[1] + + if currentPath.len > 0: + profiles.add(currentPath) + + for profile in profiles: + let profileDir = if profile.startsWith("/"): profile + else: firefoxPath / profile + + if not safeDirExists(profileDir): + continue + + let credFiles = [ + (FirefoxLoginsFile, "Firefox stored logins database"), + (FirefoxCookiesDb, "Firefox cookies database"), + (FirefoxKeyDb, "Firefox key database") + ] + + for (fileName, desc) in credFiles: + let filePath = profileDir / fileName + if safeFileExists(filePath): + let sev = if isWorldReadable(filePath): svCritical + elif isGroupReadable(filePath): svHigh + else: svMedium + + result.findings.add(makeFinding(filePath, desc, catBrowser, sev)) + +proc scanChromium(config: HarvestConfig, result: var CollectorResult) = + for chromiumDir in ChromiumDirs: + let basePath = expandHome(config, chromiumDir) + if not safeDirExists(basePath): + continue + + let browserName = chromiumDir.split("/")[^1] + + let defaultProfile = basePath / "Default" + var profileDirs: seq[string] = @[] + + if safeDirExists(defaultProfile): + profileDirs.add(defaultProfile) + + try: + for kind, path in walkDir(basePath): + if kind == pcDir and path.extractFilename().startsWith("Profile "): + profileDirs.add(path) + except CatchableError as e: + result.errors.add("Error walking " & browserName & " profiles: " & e.msg) + + for profileDir in profileDirs: + let credFiles = [ + (ChromiumLoginData, browserName & " stored login database"), + (ChromiumCookies, browserName & " cookies database"), + (ChromiumWebData, browserName & " web data (autofill, payment methods)") + ] + + for (fileName, desc) in credFiles: + let filePath = profileDir / fileName + if safeFileExists(filePath): + let sev = if isWorldReadable(filePath): svCritical + elif isGroupReadable(filePath): svHigh + else: svMedium + + result.findings.add(makeFinding(filePath, desc, catBrowser, sev)) + +proc collect*(config: HarvestConfig): CollectorResult = + result = newCollectorResult("browser", catBrowser) + let start = getMonoTime() + + scanFirefox(config, result) + scanChromium(config, result) + + let elapsed = getMonoTime() - start + result.durationMs = elapsed.inMilliseconds diff --git a/PROJECTS/intermediate/credential-enumeration/src/collectors/cloud.nim b/PROJECTS/intermediate/credential-enumeration/src/collectors/cloud.nim new file mode 100644 index 00000000..13efdd75 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/collectors/cloud.nim @@ -0,0 +1,217 @@ +# ©AngelaMos | 2026 +# cloud.nim + +{.push raises: [].} + +import std/[os, strutils, tables, monotimes, times] +import ../types +import ../config +import base + +proc scanAws(config: HarvestConfig, result: var CollectorResult) = + let credPath = expandHome(config, AwsCredentials) + let configPath = expandHome(config, AwsConfig) + + if safeFileExists(credPath): + let content = readFileContent(credPath) + let lines = content.splitLines() + var profileCount = 0 + var staticKeys = 0 + var sessionKeys = 0 + + for line in lines: + let stripped = line.strip() + if stripped.startsWith("["): + inc profileCount + if stripped.toLowerAscii().startsWith("aws_access_key_id"): + let parts = stripped.split("=", maxsplit = 1) + if parts.len == 2: + let keyVal = parts[1].strip() + if keyVal.startsWith(AwsStaticKeyPrefix): + inc staticKeys + elif keyVal.startsWith(AwsSessionKeyPrefix): + inc sessionKeys + + var sev = svMedium + if staticKeys > 0: + sev = svHigh + if isWorldReadable(credPath): + sev = svCritical + + var cred = Credential( + source: credPath, + credType: "aws_credentials", + preview: $profileCount & " profiles, " & $staticKeys & " static keys", + metadata: initTable[string, string]() + ) + cred.setMeta("profiles", $profileCount) + cred.setMeta("static_keys", $staticKeys) + cred.setMeta("session_keys", $sessionKeys) + + result.findings.add(makeFindingWithCred( + credPath, + "AWS credentials file: " & $profileCount & " profiles, " & + $staticKeys & " static keys, " & $sessionKeys & " session keys", + catCloud, sev, cred + )) + + if safeFileExists(configPath): + let lines = readFileLines(configPath) + var profileCount = 0 + var hasSso = false + var hasMfa = false + + for line in lines: + let stripped = line.strip() + if stripped.startsWith("["): + inc profileCount + if "sso_" in stripped.toLowerAscii(): + hasSso = true + if "mfa_serial" in stripped.toLowerAscii(): + hasMfa = true + + var desc = "AWS config: " & $profileCount & " profiles" + if hasSso: + desc &= ", SSO configured" + if hasMfa: + desc &= ", MFA configured" + + result.findings.add(makeFinding(configPath, desc, catCloud, svInfo)) + +proc scanGcp(config: HarvestConfig, result: var CollectorResult) = + let gcpDir = expandHome(config, GcpConfigDir) + let adcPath = expandHome(config, GcpAppDefaultCreds) + + if safeFileExists(adcPath): + let content = readFileContent(adcPath) + let isServiceAccount = GcpServiceAccountPattern in content.toLowerAscii() + let sev = if isServiceAccount: svHigh else: svMedium + + var cred = Credential( + source: adcPath, + credType: "gcp_credentials", + preview: if isServiceAccount: "Service account key" else: "User credentials", + metadata: initTable[string, string]() + ) + let credTypeStr = if isServiceAccount: "service_account" else: "authorized_user" + cred.setMeta("type", credTypeStr) + + result.findings.add(makeFindingWithCred( + adcPath, + "GCP application default credentials (" & credTypeStr & ")", + catCloud, sev, cred + )) + + if safeDirExists(gcpDir): + try: + for kind, path in walkDir(gcpDir): + if kind != pcFile: + continue + if path.endsWith(".json") and path != adcPath: + let content = readFileContent(path) + if GcpServiceAccountPattern in content.toLowerAscii(): + result.findings.add(makeFinding( + path, + "GCP service account key file", + catCloud, svHigh + )) + except CatchableError as e: + result.errors.add("Error scanning GCP directory: " & e.msg) + +proc scanAzure(config: HarvestConfig, result: var CollectorResult) = + let azDir = expandHome(config, AzureDir) + if not safeDirExists(azDir): + return + + let tokenPaths = [ + expandHome(config, AzureAccessTokens), + expandHome(config, AzureMsalTokenCache) + ] + + var foundTokens = false + for path in tokenPaths: + if safeFileExists(path): + foundTokens = true + let sev = if isWorldReadable(path): svCritical else: svMedium + result.findings.add(makeFinding( + path, + "Azure token cache", + catCloud, sev + )) + + if not foundTokens: + result.findings.add(makeFinding( + azDir, + "Azure CLI configuration directory", + catCloud, svInfo + )) + +proc scanKubernetes(config: HarvestConfig, result: var CollectorResult) = + let kubePath = expandHome(config, KubeConfig) + if not safeFileExists(kubePath): + return + + let content = readFileContent(kubePath) + let lines = content.splitLines() + var contextCount = 0 + var userCount = 0 + var hasTokenAuth = false + var hasCertAuth = false + + var inContexts = false + var inUsers = false + + for line in lines: + let stripped = line.strip() + if stripped == KubeContextMarker: + inContexts = true + inUsers = false + elif stripped == KubeUserMarker: + inUsers = true + inContexts = false + elif stripped.len > 0 and not stripped.startsWith(" ") and + not stripped.startsWith("-"): + inContexts = false + inUsers = false + + if inContexts and stripped.startsWith("- context:"): + inc contextCount + if inUsers and stripped.startsWith("- name:"): + inc userCount + if "token:" in stripped: + hasTokenAuth = true + if "client-certificate-data:" in stripped: + hasCertAuth = true + + let sev = if isWorldReadable(kubePath): svCritical + elif hasTokenAuth: svHigh + else: svMedium + + var cred = Credential( + source: kubePath, + credType: "kubernetes_config", + preview: $contextCount & " contexts, " & $userCount & " users", + metadata: initTable[string, string]() + ) + cred.setMeta("contexts", $contextCount) + cred.setMeta("users", $userCount) + cred.setMeta("token_auth", $hasTokenAuth) + cred.setMeta("cert_auth", $hasCertAuth) + + result.findings.add(makeFindingWithCred( + kubePath, + "Kubernetes config: " & $contextCount & " contexts, " & $userCount & " users", + catCloud, sev, cred + )) + +proc collect*(config: HarvestConfig): CollectorResult = + result = newCollectorResult("cloud", catCloud) + let start = getMonoTime() + + scanAws(config, result) + scanGcp(config, result) + scanAzure(config, result) + scanKubernetes(config, result) + + let elapsed = getMonoTime() - start + result.durationMs = elapsed.inMilliseconds diff --git a/PROJECTS/intermediate/credential-enumeration/src/collectors/git.nim b/PROJECTS/intermediate/credential-enumeration/src/collectors/git.nim new file mode 100644 index 00000000..12e9d7e4 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/collectors/git.nim @@ -0,0 +1,137 @@ +# ©AngelaMos | 2026 +# git.nim + +{.push raises: [].} + +import std/[strutils, tables, monotimes, times] +import ../types +import ../config +import base + +proc scanGitCredentials(config: HarvestConfig, result: var CollectorResult) = + let credPath = expandHome(config, GitCredentials) + if not safeFileExists(credPath): + return + + let lines = readFileLines(credPath) + var credCount = 0 + + for line in lines: + let stripped = line.strip() + if stripped.len > 0 and "://" in stripped: + inc credCount + + if credCount == 0: + return + + var cred = Credential( + source: credPath, + credType: "git_plaintext_credentials", + preview: $credCount & " stored credentials", + metadata: initTable[string, string]() + ) + cred.setMeta("count", $credCount) + cred.setMeta("permissions", getPermsString(credPath)) + + let sev = if isWorldReadable(credPath): svCritical else: svHigh + + result.findings.add(makeFindingWithCred( + credPath, + "Plaintext Git credential store with " & $credCount & " entries", + catGit, sev, cred + )) + +proc scanGitConfig(config: HarvestConfig, result: var CollectorResult) = + let paths = [ + expandHome(config, GitConfig), + expandHome(config, GitConfigLocal) + ] + + for path in paths: + if not safeFileExists(path): + continue + + let content = readFileContent(path) + if content.len == 0: + continue + + let lines = content.splitLines() + var inCredentialSection = false + var helperValue = "" + + for line in lines: + let stripped = line.strip() + if stripped.startsWith("["): + inCredentialSection = stripped.toLowerAscii().startsWith("[credential") + + if inCredentialSection and stripped.toLowerAscii().startsWith("helper"): + let parts = stripped.split("=", maxsplit = 1) + if parts.len == 2: + helperValue = parts[1].strip() + + if helperValue.len > 0: + let sev = if helperValue == "store": svMedium else: svInfo + result.findings.add(makeFinding( + path, + "Git credential helper configured: " & helperValue, + catGit, sev + )) + +proc scanTokenPatterns(config: HarvestConfig, result: var CollectorResult) = + let configPaths = [ + expandHome(config, GitConfig), + expandHome(config, GitConfigLocal) + ] + + for path in configPaths: + if not safeFileExists(path): + continue + + let content = readFileContent(path) + if content.len == 0: + continue + + for pattern in GitHubTokenPatterns: + let idx = content.find(pattern) + if idx >= 0: + let tokenStart = content[idx ..< min(idx + 20, content.len)] + var cred = Credential( + source: path, + credType: "github_token", + preview: redactValue(tokenStart, 8), + metadata: initTable[string, string]() + ) + result.findings.add(makeFindingWithCred( + path, + "GitHub personal access token detected", + catGit, svHigh, cred + )) + break + + for pattern in GitLabTokenPatterns: + let idx = content.find(pattern) + if idx >= 0: + let tokenStart = content[idx ..< min(idx + 20, content.len)] + var cred = Credential( + source: path, + credType: "gitlab_token", + preview: redactValue(tokenStart, 8), + metadata: initTable[string, string]() + ) + result.findings.add(makeFindingWithCred( + path, + "GitLab personal access token detected", + catGit, svHigh, cred + )) + break + +proc collect*(config: HarvestConfig): CollectorResult = + result = newCollectorResult("git", catGit) + let start = getMonoTime() + + scanGitCredentials(config, result) + scanGitConfig(config, result) + scanTokenPatterns(config, result) + + let elapsed = getMonoTime() - start + result.durationMs = elapsed.inMilliseconds diff --git a/PROJECTS/intermediate/credential-enumeration/src/collectors/history.nim b/PROJECTS/intermediate/credential-enumeration/src/collectors/history.nim new file mode 100644 index 00000000..d6eacfdf --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/collectors/history.nim @@ -0,0 +1,163 @@ +# ©AngelaMos | 2026 +# history.nim + +{.push raises: [].} + +import std/[os, strutils, tables, monotimes, times] +import ../types +import ../config +import base + +const + MaxHistoryLines = 50000 + MaxEnvDepth = 5 + +proc redactLine*(line: string): string = + let eqIdx = line.find('=') + if eqIdx < 0: + return line + let key = line[0 ..< eqIdx] + let valStart = eqIdx + 1 + if valStart >= line.len: + return line + let value = line[valStart .. ^1].strip() + let cleanValue = if (value.startsWith("\"") and value.endsWith("\"")) or + (value.startsWith("'") and value.endsWith("'")): + value[1 ..< ^1] + else: + value + result = key & "=" & redactValue(cleanValue, 4) + +proc matchesSecretPattern*(line: string): bool = + let upper = line.toUpperAscii() + for pattern in SecretPatterns: + if pattern in upper: + if "export " in line.toLowerAscii() or + line.strip().startsWith(pattern.split("=")[0]): + return true + +proc matchesCommandPattern*(line: string): bool = + let lower = line.toLowerAscii() + for pattern in HistoryCommandPatterns: + let parts = pattern.split(".*") + if parts.len >= 2: + var allFound = true + var searchFrom = 0 + for part in parts: + let idx = lower.find(part, start = searchFrom) + if idx < 0: + allFound = false + break + searchFrom = idx + part.len + if allFound: + return true + elif pattern in lower: + return true + +proc scanHistoryFile( + config: HarvestConfig, + fileName: string, + result: var CollectorResult +) = + let path = expandHome(config, fileName) + if not safeFileExists(path): + return + + let lines = readFileLines(path, MaxHistoryLines) + var secretCount = 0 + var commandCount = 0 + + for i, line in lines: + let stripped = line.strip() + if stripped.len == 0: + continue + + if matchesSecretPattern(stripped): + inc secretCount + if secretCount <= 20: + var cred = Credential( + source: path, + credType: "history_secret", + preview: redactLine(stripped), + metadata: initTable[string, string]() + ) + cred.setMeta("line_region", $(i + 1)) + + result.findings.add(makeFindingWithCred( + path, + "Secret in shell history (line ~" & $(i + 1) & ")", + catHistory, svHigh, cred + )) + + elif matchesCommandPattern(stripped): + inc commandCount + if commandCount <= 10: + let preview = if stripped.len > 60: stripped[0 ..< 60] & "..." + else: stripped + + result.findings.add(makeFinding( + path, + "Sensitive command in history: " & preview, + catHistory, svMedium + )) + + if secretCount > 20: + result.findings.add(makeFinding( + path, + $secretCount & " total secret patterns found (showing first 20)", + catHistory, svInfo + )) + +proc walkForEnv( + dir: string, + depth: int, + excludePatterns: seq[string], + result: var CollectorResult +) = + if depth > MaxEnvDepth: + return + try: + for kind, path in walkDir(dir): + if matchesExclude(path, excludePatterns): + continue + case kind + of pcFile: + let name = path.extractFilename() + for envPattern in EnvFilePatterns: + if name == envPattern: + let sev = if isWorldReadable(path): svCritical + elif isGroupReadable(path): svHigh + else: svMedium + result.findings.add(makeFinding( + path, + "Environment file: " & name, + catHistory, sev + )) + break + of pcDir: + let dirName = path.extractFilename() + if dirName.startsWith(".") and dirName notin [".config", ".local"]: + continue + if dirName in ["node_modules", "vendor", ".git", "__pycache__", + ".venv", "venv", ".cache"]: + continue + walkForEnv(path, depth + 1, excludePatterns, result) + else: + discard + except CatchableError as e: + result.errors.add("Error scanning for env files in " & dir & ": " & e.msg) + +proc scanEnvFiles(config: HarvestConfig, result: var CollectorResult) = + walkForEnv(config.targetDir, 0, config.excludePatterns, result) + +proc collect*(config: HarvestConfig): CollectorResult = + result = newCollectorResult("history", catHistory) + let start = getMonoTime() + + for histFile in HistoryFiles: + scanHistoryFile(config, histFile, result) + + scanEnvFiles(config, result) + + let elapsed = getMonoTime() - start + result.durationMs = elapsed.inMilliseconds diff --git a/PROJECTS/intermediate/credential-enumeration/src/collectors/keyring.nim b/PROJECTS/intermediate/credential-enumeration/src/collectors/keyring.nim new file mode 100644 index 00000000..47d89bce --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/collectors/keyring.nim @@ -0,0 +1,157 @@ +# ©AngelaMos | 2026 +# keyring.nim + +{.push raises: [].} + +import std/[os, strutils, tables, monotimes, times] +import ../types +import ../config +import base + +proc scanGnomeKeyring(config: HarvestConfig, result: var CollectorResult) = + let keyringDir = expandHome(config, GnomeKeyringDir) + if not safeDirExists(keyringDir): + return + + try: + var dbCount = 0 + for kind, path in walkDir(keyringDir): + if kind != pcFile: + continue + if path.endsWith(".keyring"): + inc dbCount + let sev = if isWorldReadable(path): svCritical + elif isGroupReadable(path): svHigh + else: svMedium + + result.findings.add(makeFinding( + path, + "GNOME Keyring database", + catKeyring, sev + )) + + if dbCount == 0: + result.findings.add(makeFinding( + keyringDir, + "GNOME Keyring directory exists (empty)", + catKeyring, svInfo + )) + except CatchableError as e: + result.errors.add("Error scanning GNOME Keyring: " & e.msg) + +proc scanKdeWallet(config: HarvestConfig, result: var CollectorResult) = + let walletDir = expandHome(config, KdeWalletDir) + if not safeDirExists(walletDir): + return + + try: + for kind, path in walkDir(walletDir): + if kind != pcFile: + continue + let sev = if isWorldReadable(path): svCritical + elif isGroupReadable(path): svHigh + else: svMedium + + result.findings.add(makeFinding( + path, + "KDE Wallet database", + catKeyring, sev + )) + except CatchableError as e: + result.errors.add("Error scanning KDE Wallet: " & e.msg) + +proc walkForKdbx( + dir: string, + depth: int, + excludePatterns: seq[string], + result: var CollectorResult +) = + if depth > 5: + return + try: + for kind, path in walkDir(dir): + if matchesExclude(path, excludePatterns): + continue + case kind + of pcFile: + if path.endsWith(KeePassExtension): + let sev = if isWorldReadable(path): svCritical + elif isGroupReadable(path): svHigh + else: svMedium + + result.findings.add(makeFinding( + path, + "KeePass database file", + catKeyring, sev + )) + of pcDir: + let dirName = path.extractFilename() + if dirName.startsWith(".") and + dirName notin [".config", ".local", ".keepass", ".keepassxc"]: + continue + if dirName in ["node_modules", "vendor", ".git", "__pycache__", + ".venv", "venv", ".cache"]: + continue + walkForKdbx(path, depth + 1, excludePatterns, result) + else: + discard + except CatchableError: + discard + +proc scanKeePass(config: HarvestConfig, result: var CollectorResult) = + walkForKdbx(config.targetDir, 0, config.excludePatterns, result) + +proc scanPassStore(config: HarvestConfig, result: var CollectorResult) = + let passDir = expandHome(config, PassStoreDir) + if not safeDirExists(passDir): + return + + var entryCount = 0 + try: + for kind, path in walkDir(passDir, relative = false): + if kind == pcFile and path.endsWith(".gpg"): + inc entryCount + except CatchableError as e: + result.errors.add("Error scanning pass store: " & e.msg) + + var cred = Credential( + source: passDir, + credType: "pass_store", + preview: $entryCount & " encrypted entries", + metadata: initTable[string, string]() + ) + cred.setMeta("entry_count", $entryCount) + + result.findings.add(makeFindingWithCred( + passDir, + "pass (password-store) with " & $entryCount & " entries", + catKeyring, svInfo, cred + )) + +proc scanBitwarden(config: HarvestConfig, result: var CollectorResult) = + let dirs = [ + expandHome(config, BitwardenDir), + expandHome(config, BitwardenCliDir) + ] + + for dir in dirs: + if safeDirExists(dir): + let sev = permissionSeverity(dir, isDir = true) + result.findings.add(makeFinding( + dir, + "Bitwarden local vault data", + catKeyring, sev + )) + +proc collect*(config: HarvestConfig): CollectorResult = + result = newCollectorResult("keyring", catKeyring) + let start = getMonoTime() + + scanGnomeKeyring(config, result) + scanKdeWallet(config, result) + scanKeePass(config, result) + scanPassStore(config, result) + scanBitwarden(config, result) + + let elapsed = getMonoTime() - start + result.durationMs = elapsed.inMilliseconds diff --git a/PROJECTS/intermediate/credential-enumeration/src/collectors/ssh.nim b/PROJECTS/intermediate/credential-enumeration/src/collectors/ssh.nim new file mode 100644 index 00000000..1fbb3979 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/collectors/ssh.nim @@ -0,0 +1,170 @@ +# ©AngelaMos | 2026 +# ssh.nim + +{.push raises: [].} + +import std/[os, strutils, tables, monotimes, times] +import ../types +import ../config +import base + +proc isPrivateKey*(content: string): bool = + for header in SshKeyHeaders: + if content.startsWith(header): + return true + +proc isEncrypted*(content: string): bool = + for marker in SshEncryptedMarkers: + if marker in content: + return true + +proc scanKeys(config: HarvestConfig, result: var CollectorResult) = + let sshPath = expandHome(config, SshDir) + if not safeDirExists(sshPath): + return + + let dirPerms = getNumericPerms(sshPath) + if dirPerms >= 0 and dirPerms != OwnerOnlyDirPerms: + let sev = permissionSeverity(sshPath, isDir = true) + result.findings.add(makeFinding( + sshPath, + "SSH directory permissions " & getPermsString(sshPath) & + " (expected " & SshSafeDirPerms & ")", + catSsh, sev + )) + + try: + for kind, path in walkDir(sshPath): + if kind != pcFile: + continue + if matchesExclude(path, config.excludePatterns): + continue + + let content = readFileContent(path) + if content.len == 0: + continue + + if not isPrivateKey(content): + continue + + let encrypted = isEncrypted(content) + let perms = getNumericPerms(path) + var sev: Severity + + if not encrypted: + sev = svHigh + else: + sev = svInfo + + if perms >= 0 and (perms and WorldReadBit) != 0: + sev = svCritical + elif perms >= 0 and (perms and GroupReadBit) != 0: + if sev < svHigh: + sev = svHigh + + let keyType = if content.startsWith(SshKeyHeaders[0]): "OpenSSH" + elif content.startsWith(SshKeyHeaders[1]): "RSA" + elif content.startsWith(SshKeyHeaders[2]): "ECDSA" + elif content.startsWith(SshKeyHeaders[3]): "DSA" + else: "Unknown" + + let desc = if encrypted: + keyType & " private key (passphrase-protected)" + else: + keyType & " private key (no passphrase)" + + var cred = Credential( + source: path, + credType: "ssh_private_key", + preview: keyType & " key", + metadata: initTable[string, string]() + ) + cred.setMeta("encrypted", $encrypted) + cred.setMeta("permissions", getPermsString(path)) + + result.findings.add(makeFindingWithCred(path, desc, catSsh, sev, cred)) + except CatchableError as e: + result.errors.add("Error scanning SSH keys: " & e.msg) + +proc scanConfig(config: HarvestConfig, result: var CollectorResult) = + let configPath = expandHome(config, SshDir / SshConfig) + if not safeFileExists(configPath): + return + + let lines = readFileLines(configPath) + var hostCount = 0 + var weakSettings: seq[string] = @[] + + for line in lines: + let stripped = line.strip() + if stripped.toLowerAscii().startsWith("host ") and + not stripped.toLowerAscii().startsWith("host *"): + inc hostCount + + if stripped.toLowerAscii().startsWith("passwordauthentication yes"): + weakSettings.add("PasswordAuthentication enabled") + + if stripped.toLowerAscii().startsWith("stricthostkeychecking no"): + weakSettings.add("StrictHostKeyChecking disabled") + + if hostCount > 0: + result.findings.add(makeFinding( + configPath, + "SSH config with " & $hostCount & " host entries", + catSsh, svInfo + )) + + for setting in weakSettings: + result.findings.add(makeFinding( + configPath, + "Weak SSH setting: " & setting, + catSsh, svMedium + )) + +proc scanAuthorizedKeys(config: HarvestConfig, result: var CollectorResult) = + let akPath = expandHome(config, SshDir / SshAuthorizedKeys) + if not safeFileExists(akPath): + return + + let lines = readFileLines(akPath) + var keyCount = 0 + for line in lines: + if line.strip().len > 0 and not line.strip().startsWith("#"): + inc keyCount + + if keyCount > 0: + result.findings.add(makeFinding( + akPath, + $keyCount & " authorized public keys", + catSsh, svInfo + )) + +proc scanKnownHosts(config: HarvestConfig, result: var CollectorResult) = + let khPath = expandHome(config, SshDir / SshKnownHosts) + if not safeFileExists(khPath): + return + + let lines = readFileLines(khPath) + var hostCount = 0 + for line in lines: + if line.strip().len > 0 and not line.strip().startsWith("#"): + inc hostCount + + if hostCount > 0: + result.findings.add(makeFinding( + khPath, + $hostCount & " known hosts", + catSsh, svInfo + )) + +proc collect*(config: HarvestConfig): CollectorResult = + result = newCollectorResult("ssh", catSsh) + let start = getMonoTime() + + scanKeys(config, result) + scanConfig(config, result) + scanAuthorizedKeys(config, result) + scanKnownHosts(config, result) + + let elapsed = getMonoTime() - start + result.durationMs = elapsed.inMilliseconds diff --git a/PROJECTS/intermediate/credential-enumeration/src/config.nim b/PROJECTS/intermediate/credential-enumeration/src/config.nim new file mode 100644 index 00000000..15f2619b --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/config.nim @@ -0,0 +1,245 @@ +# ©AngelaMos | 2026 +# config.nim + +{.push raises: [].} + +import std/os +import types + +const + AppVersion* = "0.1.0" + BinaryName* = "credenum" + + AllModules*: seq[Category] = @[ + catBrowser, catSsh, catCloud, + catHistory, catKeyring, catGit, catApptoken + ] + + ModuleNames*: array[Category, string] = [ + catBrowser: "browser", + catSsh: "ssh", + catCloud: "cloud", + catHistory: "history", + catKeyring: "keyring", + catGit: "git", + catApptoken: "apptoken" + ] + + ModuleDescriptions*: array[Category, string] = [ + catBrowser: "Browser credential stores", + catSsh: "SSH keys and configuration", + catCloud: "Cloud provider configurations", + catHistory: "Shell history and environment files", + catKeyring: "Keyrings and password stores", + catGit: "Git credential stores", + catApptoken: "Application tokens and database configs" + ] + +const + FirefoxDir* = ".mozilla/firefox" + FirefoxProfilesIni* = "profiles.ini" + FirefoxLoginsFile* = "logins.json" + FirefoxCookiesDb* = "cookies.sqlite" + FirefoxKeyDb* = "key4.db" + + ChromiumDirs* = [ + ".config/google-chrome", + ".config/chromium", + ".config/brave", + ".config/vivaldi" + ] + ChromiumLoginData* = "Login Data" + ChromiumCookies* = "Cookies" + ChromiumWebData* = "Web Data" + +const + SshDir* = ".ssh" + SshConfig* = "config" + SshAuthorizedKeys* = "authorized_keys" + SshKnownHosts* = "known_hosts" + + SshKeyHeaders* = [ + "-----BEGIN OPENSSH PRIVATE KEY-----", + "-----BEGIN RSA PRIVATE KEY-----", + "-----BEGIN EC PRIVATE KEY-----", + "-----BEGIN DSA PRIVATE KEY-----", + "-----BEGIN PRIVATE KEY-----" + ] + + SshEncryptedMarkers* = [ + "ENCRYPTED", + "Proc-Type: 4,ENCRYPTED", + "aes256-ctr", + "aes128-ctr", + "bcrypt" + ] + + SshSafeKeyPerms* = "0600" + SshSafeDirPerms* = "0700" + +const + AwsCredentials* = ".aws/credentials" + AwsConfig* = ".aws/config" + AwsStaticKeyPrefix* = "AKIA" + AwsSessionKeyPrefix* = "ASIA" + + GcpConfigDir* = ".config/gcloud" + GcpAppDefaultCreds* = ".config/gcloud/application_default_credentials.json" + GcpServiceAccountPattern* = "service_account" + + AzureDir* = ".azure" + AzureAccessTokens* = ".azure/accessTokens.json" + AzureMsalTokenCache* = ".azure/msal_token_cache.json" + + KubeConfig* = ".kube/config" + KubeContextMarker* = "contexts:" + KubeUserMarker* = "users:" + +const + HistoryFiles* = [ + ".bash_history", + ".zsh_history", + ".fish_history", + ".sh_history", + ".python_history" + ] + + SecretPatterns* = [ + "KEY=", + "SECRET=", + "TOKEN=", + "PASSWORD=", + "PASSWD=", + "API_KEY=", + "ACCESS_KEY=", + "PRIVATE_KEY=", + "AUTH_TOKEN=", + "CREDENTIALS=" + ] + + HistoryCommandPatterns* = [ + "curl.*-h.*authoriz", + "curl.*-u ", + "wget.*--header.*authoriz", + "wget.*--password", + "mysql.*-p", + "psql.*password", + "sshpass" + ] + + EnvFileName* = ".env" + EnvFilePatterns* = [".env", ".env.local", ".env.production", ".env.staging"] + +const + GnomeKeyringDir* = ".local/share/keyrings" + KdeWalletDir* = ".local/share/kwalletd" + KeePassExtension* = ".kdbx" + PassStoreDir* = ".password-store" + BitwardenDir* = ".config/Bitwarden" + BitwardenCliDir* = ".config/Bitwarden CLI" + +const + GitCredentials* = ".git-credentials" + GitConfig* = ".gitconfig" + GitConfigLocal* = ".config/git/config" + GitCredentialHelperKey* = "credential" + GitHubTokenPatterns* = ["ghp_", "gho_", "ghu_", "ghs_", "ghr_"] + GitLabTokenPatterns* = ["glpat-"] + +const + SlackDir* = ".config/Slack" + DiscordDir* = ".config/discord" + VsCodeDir* = ".config/Code" + VsCodeUserSettings* = ".config/Code/User/settings.json" + PgPass* = ".pgpass" + MyCnf* = ".my.cnf" + RedisConf* = ".rediscli_auth" + MongoRc* = ".mongorc.js" + DockerConfig* = ".docker/config.json" + +const + NetrcFile* = ".netrc" + NpmrcFile* = ".npmrc" + PypircFile* = ".pypirc" + GhCliHosts* = ".config/gh/hosts.yml" + TerraformCreds* = ".terraform.d/credentials.tfrc.json" + VaultTokenFile* = ".vault-token" + HelmRepos* = ".config/helm/repositories.yaml" + RcloneConf* = ".config/rclone/rclone.conf" + +const + OwnerOnlyFilePerms* = 0o600 + OwnerOnlyDirPerms* = 0o700 + GroupReadBit* = 0o040 + WorldReadBit* = 0o004 + +const + Banner* = """ + ██████╗██████╗ ███████╗██████╗ ███████╗███╗ ██╗██╗ ██╗███╗ ███╗ + ██╔════╝██╔══██╗██╔════╝██╔══██╗██╔════╝████╗ ██║██║ ██║████╗ ████║ + ██║ ██████╔╝█████╗ ██║ ██║█████╗ ██╔██╗ ██║██║ ██║██╔████╔██║ + ██║ ██╔══██╗██╔══╝ ██║ ██║██╔══╝ ██║╚██╗██║██║ ██║██║╚██╔╝██║ + ╚██████╗██║ ██║███████╗██████╔╝███████╗██║ ╚████║╚██████╔╝██║ ╚═╝ ██║ + ╚═════╝╚═╝ ╚═╝╚══════╝╚═════╝ ╚══════╝╚═╝ ╚═══╝ ╚═════╝ ╚═╝ ╚═╝""" + + BannerTagline* = "Post-access credential exposure detection" + +const + ColorReset* = "\e[0m" + ColorBold* = "\e[1m" + ColorDim* = "\e[2m" + ColorRed* = "\e[31m" + ColorGreen* = "\e[32m" + ColorYellow* = "\e[33m" + ColorBlue* = "\e[34m" + ColorMagenta* = "\e[35m" + ColorCyan* = "\e[36m" + ColorWhite* = "\e[37m" + ColorBoldRed* = "\e[1;31m" + ColorBoldGreen* = "\e[1;32m" + ColorBoldYellow* = "\e[1;33m" + ColorBoldMagenta* = "\e[1;35m" + ColorBoldCyan* = "\e[1;36m" + + SeverityColors*: array[Severity, string] = [ + svInfo: ColorDim, + svLow: ColorCyan, + svMedium: ColorYellow, + svHigh: ColorBoldMagenta, + svCritical: ColorBoldRed + ] + + SeverityLabels*: array[Severity, string] = [ + svInfo: "INFO", + svLow: "LOW", + svMedium: "MEDIUM", + svHigh: "HIGH", + svCritical: "CRITICAL" + ] + +const + BoxTopLeft* = "┌" + BoxTopRight* = "┐" + BoxBottomLeft* = "└" + BoxBottomRight* = "┘" + BoxHorizontal* = "─" + BoxVertical* = "│" + BoxTeeRight* = "├" + BoxTeeLeft* = "┤" + BoxCross* = "┼" + Bullet* = "●" + Arrow* = "▸" + CheckMark* = "✓" + CrossMark* = "✗" + +proc defaultConfig*(): HarvestConfig = + HarvestConfig( + targetDir: getHomeDir(), + enabledModules: AllModules, + excludePatterns: @[], + outputFormat: fmtTerminal, + outputPath: "", + dryRun: false, + quiet: false, + verbose: false + ) diff --git a/PROJECTS/intermediate/credential-enumeration/src/harvester.nim b/PROJECTS/intermediate/credential-enumeration/src/harvester.nim new file mode 100644 index 00000000..30cf52e2 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/harvester.nim @@ -0,0 +1,154 @@ +# ©AngelaMos | 2026 +# harvester.nim + +{.push raises: [].} + +import std/[parseopt, strutils, os, times] +import types +import config +import runner +import output/terminal +import output/json + +proc printHelp() = + try: + stdout.writeLine ColorBold & BinaryName & ColorReset & " v" & AppVersion + stdout.writeLine "" + stdout.writeLine " Post-access credential exposure detection for Linux systems" + stdout.writeLine "" + stdout.writeLine ColorBold & "USAGE:" & ColorReset + stdout.writeLine " " & BinaryName & " [flags]" + stdout.writeLine "" + stdout.writeLine ColorBold & "FLAGS:" & ColorReset + stdout.writeLine " --target Target home directory (default: current user)" + stdout.writeLine " --modules Comma-separated modules: " & ModuleNames[catBrowser] & "," & ModuleNames[catSsh] & "," & ModuleNames[catCloud] & "," & ModuleNames[catHistory] & "," & ModuleNames[catKeyring] & "," & ModuleNames[catGit] & "," & ModuleNames[catApptoken] + stdout.writeLine " --exclude Comma-separated path patterns to skip" + stdout.writeLine " --format Output format: terminal, json, both (default: terminal)" + stdout.writeLine " --output Write JSON output to file" + stdout.writeLine " --dry-run List scan targets without reading files" + stdout.writeLine " --quiet Suppress banner, show findings only" + stdout.writeLine " --verbose Show all scanned paths including empty modules" + stdout.writeLine " --help Show this help" + stdout.writeLine " --version Show version" + stdout.writeLine "" + stdout.writeLine ColorBold & "EXAMPLES:" & ColorReset + stdout.writeLine " " & BinaryName & " Scan current user" + stdout.writeLine " " & BinaryName & " --format json JSON output" + stdout.writeLine " " & BinaryName & " --modules ssh,git,cloud Scan specific modules" + stdout.writeLine " " & BinaryName & " --target /home/victim Scan another user" + stdout.writeLine " " & BinaryName & " --dry-run Preview scan paths" + stdout.writeLine "" + except CatchableError: + discard + +proc printVersion() = + try: + stdout.writeLine BinaryName & " " & AppVersion + except CatchableError: + discard + +proc parseModules*(input: string): seq[Category] = + result = @[] + let parts = input.split(",") + for part in parts: + let name = part.strip().toLowerAscii() + for cat in Category: + if ModuleNames[cat] == name: + result.add(cat) + break + +proc parseCli(): HarvestConfig = + result = defaultConfig() + + var parser = initOptParser( + commandLineParams(), + shortNoVal = {'d', 'q', 'v', 'h'}, + longNoVal = @["dry-run", "quiet", "verbose", "help", "version"] + ) + + while true: + parser.next() + case parser.kind + of cmdEnd: + break + of cmdShortOption, cmdLongOption: + case parser.key.toLowerAscii() + of "target", "t": + result.targetDir = parser.val + of "modules", "m": + result.enabledModules = parseModules(parser.val) + of "exclude", "e": + result.excludePatterns = parser.val.split(",") + of "format", "f": + case parser.val.toLowerAscii() + of "json": + result.outputFormat = fmtJson + of "both": + result.outputFormat = fmtBoth + else: + result.outputFormat = fmtTerminal + of "output", "o": + result.outputPath = parser.val + of "dry-run", "dry", "d": + result.dryRun = true + of "quiet", "q": + result.quiet = true + of "verbose", "v": + result.verbose = true + of "help", "h": + printHelp() + quit(0) + of "version": + printVersion() + quit(0) + else: + discard + of cmdArgument: + discard + +proc renderDryRun(conf: HarvestConfig) = + try: + stdout.writeLine ColorBold & "Dry run — scan targets:" & ColorReset + stdout.writeLine "" + for cat in conf.enabledModules: + stdout.writeLine " " & ColorCyan & ModuleNames[cat] & ColorReset & ": " & ModuleDescriptions[cat] + stdout.writeLine "" + stdout.writeLine ColorDim & " Target: " & conf.targetDir & ColorReset + stdout.writeLine "" + except CatchableError: + discard + +proc main() = + let conf = parseCli() + + if conf.dryRun: + renderDryRun(conf) + quit(0) + + var report = runCollectors(conf) + + {.cast(raises: []).}: + report.metadata.timestamp = now().utc.format("yyyy-MM-dd'T'HH:mm:ss'Z'") + + case conf.outputFormat + of fmtTerminal: + renderTerminal(report, conf.quiet, conf.verbose) + of fmtJson: + renderJson(report, conf.outputPath) + of fmtBoth: + renderTerminal(report, conf.quiet, conf.verbose) + renderJson(report, conf.outputPath) + + var hasHighSeverity = false + for sev in [svCritical, svHigh]: + if report.summary[sev] > 0: + hasHighSeverity = true + break + + if hasHighSeverity: + quit(1) + else: + quit(0) + +when isMainModule: + main() diff --git a/PROJECTS/intermediate/credential-enumeration/src/output/json.nim b/PROJECTS/intermediate/credential-enumeration/src/output/json.nim new file mode 100644 index 00000000..9a179341 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/output/json.nim @@ -0,0 +1,88 @@ +# ©AngelaMos | 2026 +# json.nim + +{.push raises: [].} + +import std/[json, options, tables] +import ../types + +proc credentialToJson(cred: Credential): JsonNode = + result = newJObject() + {.cast(raises: []).}: + result["source"] = newJString(cred.source) + result["type"] = newJString(cred.credType) + result["preview"] = newJString(cred.preview) + let meta = newJObject() + for key, val in cred.metadata: + meta[key] = newJString(val) + result["metadata"] = meta + +proc findingToJson(f: Finding): JsonNode = + result = newJObject() + {.cast(raises: []).}: + result["path"] = newJString(f.path) + result["category"] = newJString($f.category) + result["severity"] = newJString($f.severity) + result["description"] = newJString(f.description) + result["permissions"] = newJString(f.permissions) + result["modified"] = newJString(f.modified) + result["size"] = newJInt(f.size) + if f.credential.isSome: + result["credential"] = credentialToJson(f.credential.get()) + +proc collectorResultToJson(res: CollectorResult): JsonNode = + result = newJObject() + {.cast(raises: []).}: + result["name"] = newJString(res.name) + result["category"] = newJString($res.category) + let findings = newJArray() + for f in res.findings: + findings.add(findingToJson(f)) + result["findings"] = findings + result["duration_ms"] = newJInt(res.durationMs) + let errors = newJArray() + for e in res.errors: + errors.add(newJString(e)) + result["errors"] = errors + +proc reportToJson*(report: Report): JsonNode = + result = newJObject() + {.cast(raises: []).}: + let metadata = newJObject() + metadata["timestamp"] = newJString(report.metadata.timestamp) + metadata["target"] = newJString(report.metadata.target) + metadata["version"] = newJString(report.metadata.version) + metadata["duration_ms"] = newJInt(report.metadata.durationMs) + let modules = newJArray() + for m in report.metadata.modules: + modules.add(newJString(m)) + metadata["modules"] = modules + result["metadata"] = metadata + + let results = newJArray() + for res in report.results: + results.add(collectorResultToJson(res)) + result["modules"] = results + + let summary = newJObject() + for sev in Severity: + summary[$sev] = newJInt(report.summary[sev]) + result["summary"] = summary + +proc renderJson*(report: Report, outputPath: string) = + let root = reportToJson(report) + let pretty = root.pretty(2) + + if outputPath.len > 0: + try: + writeFile(outputPath, pretty & "\n") + except CatchableError as e: + try: + stderr.writeLine "Warning: could not write to " & outputPath & ": " & e.msg + except CatchableError: + discard + + try: + stdout.writeLine pretty + except CatchableError: + discard diff --git a/PROJECTS/intermediate/credential-enumeration/src/output/terminal.nim b/PROJECTS/intermediate/credential-enumeration/src/output/terminal.nim new file mode 100644 index 00000000..630ba646 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/output/terminal.nim @@ -0,0 +1,203 @@ +# ©AngelaMos | 2026 +# terminal.nim + +{.push raises: [].} + +import std/[strutils, options] +import ../types +import ../config + +const + BoxWidth = 78 + InnerWidth = BoxWidth - 2 + +proc visualLen(s: string): int = + var i = 0 + while i < s.len: + if s[i] == '\e': + while i < s.len and s[i] != 'm': + inc i + if i < s.len: + inc i + elif (s[i].ord and 0xC0) == 0x80: + inc i + else: + inc result + inc i + +proc truncateVisual(s: string, maxLen: int): string = + var vLen = 0 + var i = 0 + while i < s.len: + if s[i] == '\e': + let start = i + while i < s.len and s[i] != 'm': + inc i + if i < s.len: + inc i + result.add(s[start ..< i]) + elif (s[i].ord and 0xC0) == 0x80: + result.add(s[i]) + inc i + else: + if vLen >= maxLen - 3: + result.add("...") + return + result.add(s[i]) + inc vLen + inc i + +proc writeBoxLine(content: string) = + try: + stdout.write content + let vLen = visualLen(content) + let pad = BoxWidth - vLen - 1 + if pad > 0: + stdout.write " ".repeat(pad) + stdout.writeLine BoxVertical + except CatchableError: + discard + +proc sevBadge(sev: Severity): string = + SeverityColors[sev] & ColorBold & " " & SeverityLabels[sev] & " " & ColorReset + +proc boxLine(width: int): string = + BoxTopLeft & BoxHorizontal.repeat(width - 2) & BoxTopRight + +proc boxBottom(width: int): string = + BoxBottomLeft & BoxHorizontal.repeat(width - 2) & BoxBottomRight + +proc boxMid(width: int): string = + BoxTeeRight & BoxHorizontal.repeat(width - 2) & BoxTeeLeft + +proc renderBanner*(quiet: bool) = + if quiet: + return + try: + stdout.write ColorBoldRed + stdout.writeLine Banner + stdout.write ColorReset + stdout.writeLine "" + stdout.write " " + stdout.write ColorDim + stdout.write BannerTagline + stdout.write " v" + stdout.write AppVersion + stdout.writeLine ColorReset + stdout.writeLine "" + except CatchableError: + discard + +proc renderModuleHeader(name: string, desc: string, findingCount: int, durationMs: int64) = + try: + stdout.writeLine boxLine(BoxWidth) + let label = BoxVertical & " " & ColorBold & ColorCyan & + name.toUpperAscii() & ColorReset & ColorDim & " " & Arrow & + " " & desc & ColorReset + let stats = $findingCount & " findings" & ColorDim & " (" & + $durationMs & "ms)" & ColorReset + let usedWidth = 2 + name.len + 3 + desc.len + let statsVisual = visualLen(stats) + let gap = BoxWidth - usedWidth - statsVisual - 2 + stdout.write label + if gap > 0: + stdout.write " ".repeat(gap) + else: + stdout.write " " + stdout.write stats + stdout.writeLine " " & BoxVertical + stdout.writeLine boxMid(BoxWidth) + except CatchableError: + discard + +proc renderFinding(f: Finding) = + let descLine = BoxVertical & " " & sevBadge(f.severity) & " " & + truncateVisual(f.description, InnerWidth - 14) + writeBoxLine(descLine) + + var detail = BoxVertical & " " & ColorDim & f.path & + " [" & f.permissions & "]" + if f.modified != "unknown": + detail &= " mod:" & f.modified + detail &= ColorReset + writeBoxLine(detail) + + if f.credential.isSome: + let cred = f.credential.get() + if cred.preview.len > 0: + let previewLine = BoxVertical & " " & ColorDim & Arrow & + " " & cred.preview & ColorReset + writeBoxLine(previewLine) + +proc renderModuleErrors(errors: seq[string]) = + if errors.len == 0: + return + for err in errors: + let errLine = BoxVertical & " " & ColorBoldRed & CrossMark & + ColorReset & " " & ColorDim & err & ColorReset + writeBoxLine(errLine) + +proc renderSummary(report: Report) = + try: + stdout.writeLine "" + stdout.writeLine boxLine(BoxWidth) + writeBoxLine(BoxVertical & " " & ColorBold & "SUMMARY" & ColorReset) + stdout.writeLine boxMid(BoxWidth) + + var totalFindings = 0 + for sev in Severity: + totalFindings += report.summary[sev] + + let countLine = BoxVertical & " " & ColorBold & $totalFindings & + ColorReset & " findings across " & ColorBold & + $report.results.len & ColorReset & " modules" & ColorDim & + " (" & $report.metadata.durationMs & "ms)" & ColorReset + writeBoxLine(countLine) + + var badgeLine = BoxVertical & " " + for sev in countdown(svCritical, svInfo): + let count = report.summary[sev] + if count > 0: + badgeLine &= sevBadge(sev) & " " & $count & " " + writeBoxLine(badgeLine) + + stdout.writeLine boxBottom(BoxWidth) + stdout.writeLine "" + except CatchableError: + discard + +proc renderTerminal*(report: Report, quiet: bool, verbose: bool) = + renderBanner(quiet) + + try: + if not quiet: + stdout.write ColorDim & " Target: " & ColorReset + stdout.writeLine report.metadata.target + stdout.write ColorDim & " Modules: " & ColorReset + stdout.writeLine report.metadata.modules.join(", ") + stdout.writeLine "" + except CatchableError: + discard + + for res in report.results: + if res.findings.len == 0 and res.errors.len == 0 and not verbose: + continue + + renderModuleHeader( + res.name, + ModuleDescriptions[res.category], + res.findings.len, + res.durationMs + ) + + for finding in res.findings: + renderFinding(finding) + + renderModuleErrors(res.errors) + + try: + stdout.writeLine boxBottom(BoxWidth) + except CatchableError: + discard + + renderSummary(report) diff --git a/PROJECTS/intermediate/credential-enumeration/src/runner.nim b/PROJECTS/intermediate/credential-enumeration/src/runner.nim new file mode 100644 index 00000000..1b325cb4 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/runner.nim @@ -0,0 +1,56 @@ +# ©AngelaMos | 2026 +# runner.nim + +{.push raises: [].} + +import std/[monotimes, times] +import types +import config +import collectors/ssh +import collectors/git +import collectors/cloud +import collectors/browser +import collectors/history +import collectors/keyring +import collectors/apptoken + +proc getCollector(cat: Category): CollectorProc = + case cat + of catBrowser: browser.collect + of catSsh: ssh.collect + of catCloud: cloud.collect + of catHistory: history.collect + of catKeyring: keyring.collect + of catGit: git.collect + of catApptoken: apptoken.collect + +proc runCollectors*(config: HarvestConfig): Report = + let start = getMonoTime() + + var results: seq[CollectorResult] = @[] + var moduleNames: seq[string] = @[] + + for cat in config.enabledModules: + moduleNames.add(ModuleNames[cat]) + let collector = getCollector(cat) + let res = collector(config) + results.add(res) + + let elapsed = getMonoTime() - start + + var summary: array[Severity, int] + for res in results: + for finding in res.findings: + inc summary[finding.severity] + + result = Report( + metadata: ReportMetadata( + timestamp: "", + target: config.targetDir, + version: AppVersion, + durationMs: elapsed.inMilliseconds, + modules: moduleNames + ), + results: results, + summary: summary + ) diff --git a/PROJECTS/intermediate/credential-enumeration/src/types.nim b/PROJECTS/intermediate/credential-enumeration/src/types.nim new file mode 100644 index 00000000..05b69acc --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/src/types.nim @@ -0,0 +1,75 @@ +# ©AngelaMos | 2026 +# types.nim + +{.push raises: [].} + +import std/[options, tables] + +type + Severity* = enum + svInfo = "info" + svLow = "low" + svMedium = "medium" + svHigh = "high" + svCritical = "critical" + + Category* = enum + catBrowser = "browser" + catSsh = "ssh" + catCloud = "cloud" + catHistory = "history" + catKeyring = "keyring" + catGit = "git" + catApptoken = "apptoken" + + Credential* = object + source*: string + credType*: string + preview*: string + metadata*: Table[string, string] + + Finding* = object + path*: string + category*: Category + severity*: Severity + description*: string + credential*: Option[Credential] + permissions*: string + modified*: string + size*: int64 + + CollectorResult* = object + name*: string + category*: Category + findings*: seq[Finding] + durationMs*: int64 + errors*: seq[string] + + ReportMetadata* = object + timestamp*: string + target*: string + version*: string + durationMs*: int64 + modules*: seq[string] + + Report* = object + metadata*: ReportMetadata + results*: seq[CollectorResult] + summary*: array[Severity, int] + + OutputFormat* = enum + fmtTerminal = "terminal" + fmtJson = "json" + fmtBoth = "both" + + HarvestConfig* = object + targetDir*: string + enabledModules*: seq[Category] + excludePatterns*: seq[string] + outputFormat*: OutputFormat + outputPath*: string + dryRun*: bool + quiet*: bool + verbose*: bool + + CollectorProc* = proc(config: HarvestConfig): CollectorResult {.nimcall, raises: [].} diff --git a/PROJECTS/intermediate/credential-enumeration/tests/docker/Dockerfile b/PROJECTS/intermediate/credential-enumeration/tests/docker/Dockerfile new file mode 100644 index 00000000..74cf1690 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/tests/docker/Dockerfile @@ -0,0 +1,47 @@ +# ©AngelaMos | 2026 +# Dockerfile + +FROM nimlang/nim:2.2.0-alpine AS builder + +WORKDIR /build +COPY src/ src/ +COPY config.nims . +COPY credential-enumeration.nimble . + +RUN nim c -d:release --opt:size --passL:-static -o:/build/credenum src/harvester.nim && \ + strip -s /build/credenum + +# --------------------------------------------------------------------------- + +FROM ubuntu:24.04 + +RUN useradd -m -s /bin/bash testuser + +COPY --from=builder /build/credenum /usr/local/bin/credenum +COPY tests/docker/planted/ /home/testuser/ + +RUN chown -R testuser:testuser /home/testuser + +RUN chmod 0644 /home/testuser/.ssh/id_rsa_unprotected && \ + chmod 0600 /home/testuser/.ssh/id_ed25519_encrypted && \ + chmod 0700 /home/testuser/.ssh && \ + chmod 0644 /home/testuser/.ssh/config && \ + chmod 0644 /home/testuser/.ssh/authorized_keys && \ + chmod 0644 /home/testuser/.ssh/known_hosts + +RUN chmod 0644 /home/testuser/.aws/credentials && \ + chmod 0644 /home/testuser/.git-credentials && \ + chmod 0644 /home/testuser/.pgpass && \ + chmod 0644 /home/testuser/.my.cnf && \ + chmod 0644 /home/testuser/.netrc && \ + chmod 0644 /home/testuser/.npmrc && \ + chmod 0644 /home/testuser/.pypirc && \ + chmod 0644 /home/testuser/.vault-token + +COPY tests/docker/validate.sh /validate.sh +RUN chmod +x /validate.sh + +USER testuser +WORKDIR /home/testuser + +CMD ["/validate.sh"] diff --git a/PROJECTS/intermediate/credential-enumeration/tests/docker/validate.sh b/PROJECTS/intermediate/credential-enumeration/tests/docker/validate.sh new file mode 100644 index 00000000..13aa54e7 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/tests/docker/validate.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash +# ©AngelaMos | 2026 +# validate.sh + +set -euo pipefail + +RED='\033[0;31m' +GREEN='\033[0;32m' +BOLD='\033[1m' +NC='\033[0m' + +PASS=0 +FAIL=0 + +check() { + local label="$1" + local pattern="$2" + if echo "$OUTPUT" | grep -qi "$pattern"; then + echo -e " ${GREEN}+${NC} $label" + PASS=$((PASS + 1)) + else + echo -e " ${RED}x${NC} $label" + FAIL=$((FAIL + 1)) + fi +} + +echo -e "\n${BOLD}Running credenum...${NC}\n" + +OUTPUT=$(credenum --target /home/testuser --format json 2>&1) || true + +echo -e "${BOLD}Terminal output:${NC}\n" +credenum --target /home/testuser 2>&1 || true + +echo -e "\n${BOLD}Validating findings across all 7 categories...${NC}\n" + +echo -e "${BOLD}[ssh]${NC}" +check "SSH unprotected private key" "no passphrase" +check "SSH encrypted private key" "passphrase-protected" +check "SSH config weak settings" "PasswordAuthentication" +check "SSH authorized keys" "authorized public keys" +check "SSH known hosts" "known hosts" + +echo -e "\n${BOLD}[cloud]${NC}" +check "AWS credentials with static keys" "static keys" +check "AWS config profiles" "profiles" +check "GCP service account" "service_account" +check "Kubernetes config" "contexts" + +echo -e "\n${BOLD}[browser]${NC}" +check "Firefox stored logins" "Firefox stored logins" +check "Firefox cookies" "Firefox cookies" +check "Firefox key database" "Firefox key" +check "Chromium login data" "google-chrome.*login" + +echo -e "\n${BOLD}[history]${NC}" +check "History secret pattern" "Secret in shell history" +check "Sensitive command (curl auth)" "curl.*authoriz" +check "Sensitive command (sshpass)" "sshpass" +check "Environment file" "Environment file" + +echo -e "\n${BOLD}[keyring]${NC}" +check "GNOME Keyring" "GNOME Keyring" +check "KeePass database" "KeePass" +check "Password store" "password-store" + +echo -e "\n${BOLD}[git]${NC}" +check "Git credentials plaintext" "Plaintext Git credential" +check "Git credential helper" "credential helper" +check "GitHub token" "GitHub.*token" + +echo -e "\n${BOLD}[apptoken]${NC}" +check "PostgreSQL pgpass" "PostgreSQL" +check "MySQL config" "MySQL" +check "Docker registry auth" "Docker.*auth" +check "Netrc credential file" "Netrc credential" +check "npm auth token" "npm registry" +check "PyPI credentials" "PyPI.*credentials" +check "GitHub CLI OAuth token" "GitHub CLI" +check "Vault token" "Vault token" + +echo "" +echo -e "${BOLD}Results: ${GREEN}${PASS} passed${NC}, ${RED}${FAIL} failed${NC}" +echo "" + +if [ "$FAIL" -gt 0 ]; then + echo -e "${RED}VALIDATION FAILED${NC}" + exit 1 +fi + +echo -e "${GREEN}ALL CHECKS PASSED${NC}" diff --git a/PROJECTS/intermediate/credential-enumeration/tests/test_all b/PROJECTS/intermediate/credential-enumeration/tests/test_all new file mode 100755 index 00000000..93650f2d Binary files /dev/null and b/PROJECTS/intermediate/credential-enumeration/tests/test_all differ diff --git a/PROJECTS/intermediate/credential-enumeration/tests/test_all.nim b/PROJECTS/intermediate/credential-enumeration/tests/test_all.nim new file mode 100644 index 00000000..8fb5df05 --- /dev/null +++ b/PROJECTS/intermediate/credential-enumeration/tests/test_all.nim @@ -0,0 +1,209 @@ +# ©AngelaMos | 2026 +# test_all.nim + +import std/[unittest, strutils] +import types +import collectors/base +import collectors/ssh +import collectors/history +import harvester + +suite "redactValue": + test "short value fully redacted": + check redactValue("abc", 4) == "***" + + test "value longer than showChars": + check redactValue("mysecret", 4) == "myse****" + + test "exact showChars length": + check redactValue("abcd", 4) == "****" + + test "empty string": + check redactValue("", 4) == "" + +suite "isPrivateKey": + test "OpenSSH key": + check isPrivateKey("-----BEGIN OPENSSH PRIVATE KEY-----\ndata") + + test "RSA key": + check isPrivateKey("-----BEGIN RSA PRIVATE KEY-----\ndata") + + test "ECDSA key": + check isPrivateKey("-----BEGIN EC PRIVATE KEY-----\ndata") + + test "DSA key": + check isPrivateKey("-----BEGIN DSA PRIVATE KEY-----\ndata") + + test "generic PKCS8 key": + check isPrivateKey("-----BEGIN PRIVATE KEY-----\ndata") + + test "public key rejected": + check isPrivateKey("-----BEGIN PUBLIC KEY-----\ndata") == false + + test "random text rejected": + check isPrivateKey("this is not a key") == false + + test "empty string rejected": + check isPrivateKey("") == false + +suite "isEncrypted": + test "ENCRYPTED marker": + check isEncrypted( + "-----BEGIN RSA PRIVATE KEY-----\nProc-Type: 4,ENCRYPTED\ndata" + ) + + test "bcrypt marker": + check isEncrypted( + "-----BEGIN OPENSSH PRIVATE KEY-----\nbcrypt\ndata" + ) + + test "aes256-ctr marker": + check isEncrypted("data with aes256-ctr in it") + + test "unencrypted key": + check isEncrypted( + "-----BEGIN OPENSSH PRIVATE KEY-----\nAAAAB3NzaC1\ndata" + ) == false + +suite "matchesSecretPattern": + test "export with KEY=": + check matchesSecretPattern("export API_KEY=some_value") + + test "export with SECRET=": + check matchesSecretPattern( + "export AWS_SECRET_ACCESS_KEY=abc123" + ) + + test "bare TOKEN= at start": + check matchesSecretPattern("TOKEN=abcdef12345") + + test "bare PASSWORD=": + check matchesSecretPattern("PASSWORD=hunter2") + + test "non-secret assignment": + check matchesSecretPattern("export PATH=/usr/bin") == false + + test "ordinary command": + check matchesSecretPattern("ls -la /tmp") == false + + test "empty string": + check matchesSecretPattern("") == false + +suite "matchesCommandPattern": + test "curl with auth header": + check matchesCommandPattern( + "curl -H \"Authorization: Bearer token\" https://api.example.com" + ) + + test "curl with lowercase -h auth": + check matchesCommandPattern( + "curl -h \"authorization: bearer token\" https://api.example.com" + ) + + test "curl with -u flag": + check matchesCommandPattern( + "curl -u user:pass https://api.example.com" + ) + + test "wget with authorization header": + check matchesCommandPattern( + "wget --header=\"Authorization: Basic abc\" https://example.com" + ) + + test "wget with password": + check matchesCommandPattern( + "wget --password=secret https://files.example.com/data.zip" + ) + + test "mysql with -p flag": + check matchesCommandPattern("mysql -u root -psecret mydb") + + test "psql with password": + check matchesCommandPattern( + "psql password=secret host=db.example.com" + ) + + test "sshpass command": + check matchesCommandPattern("sshpass -p 'mypass' ssh user@host") + + test "safe curl rejected": + check matchesCommandPattern("curl https://example.com") == false + + test "safe git command rejected": + check matchesCommandPattern("git push origin main") == false + + test "empty string rejected": + check matchesCommandPattern("") == false + +suite "matchesExclude": + test "exact filename match": + check matchesExclude("/home/user/.env", @[".env"]) + + test "directory segment match": + check matchesExclude("/home/user/.git/config", @[".git"]) + + test "no false positive on partial name": + check matchesExclude( + "/home/user/.venv/lib/site.py", @[".env"] + ) == false + + test "no match on embedded substring": + check matchesExclude( + "/home/user/environment/data", @[".env"] + ) == false + + test "empty patterns": + check matchesExclude("/home/user/.env", @[]) == false + +suite "permissionSeverity": + test "returns svInfo for unreadable path": + check permissionSeverity("/nonexistent/path/abc123") == svInfo + +suite "parseModules": + test "single module": + check parseModules("ssh") == @[catSsh] + + test "multiple modules": + let mods = parseModules("ssh,git,cloud") + check mods.len == 3 + check mods.contains(catSsh) + check mods.contains(catGit) + check mods.contains(catCloud) + + test "with whitespace": + let mods = parseModules(" browser , keyring ") + check mods.len == 2 + check mods.contains(catBrowser) + check mods.contains(catKeyring) + + test "all modules": + let mods = parseModules( + "browser,ssh,cloud,history,keyring,git,apptoken" + ) + check mods.len == 7 + + test "empty string": + check parseModules("").len == 0 + + test "unknown module ignored": + check parseModules("fake,nonexistent").len == 0 + +suite "redactLine": + test "export with quoted value": + let got = redactLine("export KEY=\"secret\"") + check got.contains("KEY=") + check got.contains("\"") == false + + test "export with unquoted value": + let got = redactLine("export API_KEY=mysecretvalue") + check got.contains("API_KEY=") + check got.contains("myse") + check got.contains("cretvalue") == false + + test "no equals sign": + check redactLine("no assignment here") == "no assignment here" + + test "single-quoted value": + let got = redactLine("export TOKEN='abcdefgh'") + check got.contains("TOKEN=") + check got.contains("'") == false diff --git a/README.md b/README.md index 17f91671..fe0d1dca 100644 --- a/README.md +++ b/README.md @@ -20,16 +20,16 @@ -

Made possible by CertGames

+

Made possible by CertGames

View Complete Projects:

-

Currently building: Subdomain Takeover Scanner

+

Currently building: Binary Analysis Tool

--- @@ -68,7 +68,7 @@ Tools, courses, certifications, communities, and frameworks for cybersecurity pr | **[SSH Brute Force Detector](./SYNOPSES/beginner/SSH.Brute.Force.Detector.md)**
Monitor and block SSH attacks | ![2-4h](https://img.shields.io/badge/⏱️_2--4h-blue) ![Python](https://img.shields.io/badge/Python-3776AB?logo=python&logoColor=white) ![Beginner](https://img.shields.io/badge/●_Beginner-green) | Log parsing • Attack detection • Firewall automation
[Learn More](./SYNOPSES/beginner/SSH.Brute.Force.Detector.md) | | **[Simple C2 Beacon](./PROJECTS/beginner/c2-beacon)**
Command and Control beacon/server | ![3-5h](https://img.shields.io/badge/⏱️_10--12h-blue) ![Python](https://img.shields.io/badge/Python-3776AB?logo=python&logoColor=white) ![React](https://img.shields.io/badge/React-61DAFB?logo=react&logoColor=black) ![Docker](https://img.shields.io/badge/Docker-2496ED?logo=docker&logoColor=white) ![Beginner](https://img.shields.io/badge/●_Beginner-green) | C2 architecture • MITRE ATT&CK • WebSocket protocol • XOR encoding
[Source Code](./PROJECTS/beginner/c2-beacon) \| [Docs](./PROJECTS/beginner/c2-beacon/learn) | | **[Base64 Encoder/Decoder](./SYNOPSES/beginner/Base64.Encoder.Decoder.md)**
Multi-format encoding tool | ![1h](https://img.shields.io/badge/⏱️_2h-blue) ![Python](https://img.shields.io/badge/Python-3776AB?logo=python&logoColor=white) ![Beginner](https://img.shields.io/badge/●_Beginner-green) | Base64/32 encoding • URL encoding • Auto-detection
[Source Code](./PROJECTS/beginner/base64-tool) \| [Docs](./PROJECTS/beginner/base64-tool/learn) | -| **[Linux CIS Hardening Auditor](./PROJECTS/beginner/linux-cis-hardening-auditor)**
CIS benchmark compliance checker | ![3-4h](https://img.shields.io/badge/⏱️_6--8h-blue) ![Bash](https://img.shields.io/badge/Bash-4EAA25?logo=gnubash&logoColor=white) ![Beginner](https://img.shields.io/badge/●_Beginner-green) | CIS benchmarks • System hardening • Compliance scoring • Shell scripting
[Source Code](./PROJECTS/beginner/linux-cis-hardening-auditor) \| [Docs](./PROJECTS/beginner/linux-cis-hardening-auditor/learn) | +| **[Linux CIS Hardening Auditor](./SYNOPSES/beginner/Linux.CIS.Hardening.Auditor.md)**
CIS benchmark compliance checker | ![3-4h](https://img.shields.io/badge/⏱️_6--8h-blue) ![Bash](https://img.shields.io/badge/Bash-4EAA25?logo=gnubash&logoColor=white) ![Beginner](https://img.shields.io/badge/●_Beginner-green) | CIS benchmarks • System hardening • Compliance scoring • Shell scripting
[Learn More](./SYNOPSES/beginner/Linux.CIS.Hardening.Auditor.md) | | **[Systemd Persistence Scanner](./SYNOPSES/beginner/Systemd.Persistence.Scanner.md)**
Hunt Linux persistence mechanisms | ![2-3h](https://img.shields.io/badge/⏱️_6--8h-blue) ![Go](https://img.shields.io/badge/Go-00ADD8?logo=go&logoColor=white) ![Beginner](https://img.shields.io/badge/●_Beginner-green) | Persistence techniques • Systemd internals • Cron analysis • Threat hunting
[Learn More](./SYNOPSES/beginner/Systemd.Persistence.Scanner.md) | | **[Linux eBPF Security Tracer](./SYNOPSES/beginner/Linux.eBPF.Security.Tracer.md)**
Real-time syscall tracing with eBPF | ![2-3h](https://img.shields.io/badge/⏱️_10--12h-blue) ![Python](https://img.shields.io/badge/Python-3776AB?logo=python&logoColor=white) ![C](https://img.shields.io/badge/C-A8B9CC?logo=c&logoColor=black) ![Beginner](https://img.shields.io/badge/●_Beginner-green) | eBPF programs • Syscall tracing • BCC framework • Security observability
[Learn More](./SYNOPSES/beginner/Linux.eBPF.Security.Tracer.md) | | **[Trojan Application Builder](./SYNOPSES/beginner/Trojan.Application.Builder.md)**
Educational malware lifecycle demo | ![2-3h](https://img.shields.io/badge/⏱️_8--10h-blue) ![Python](https://img.shields.io/badge/Python-3776AB?logo=python&logoColor=white) ![Beginner](https://img.shields.io/badge/●_Beginner-green) | Trojan anatomy • Data exfiltration • File encryption • Attack lifecycle
[Learn More](./SYNOPSES/beginner/Trojan.Application.Builder.md) | @@ -88,8 +88,8 @@ Tools, courses, certifications, communities, and frameworks for cybersecurity pr | **[Secrets Scanner](./PROJECTS/intermediate/secrets-scanner)**
Scan codebases and git history for leaked secrets | ![1-2d](https://img.shields.io/badge/⏱️_1--2d-blue) ![Go](https://img.shields.io/badge/Go-00ADD8?logo=go&logoColor=white) ![Intermediate](https://img.shields.io/badge/●_Intermediate-yellow) | Secret detection • Shannon entropy • HIBP k-anonymity • SARIF output
[Source Code](./PROJECTS/intermediate/secrets-scanner) \| [Docs](./PROJECTS/intermediate/secrets-scanner/learn) | | **[API Security Scanner](./PROJECTS/intermediate/api-security-scanner)**
Enterprise API vulnerability scanner | ![3-5d](https://img.shields.io/badge/⏱️_3--5d-blue) ![FastAPI](https://img.shields.io/badge/FastAPI-009688?logo=fastapi) ![React](https://img.shields.io/badge/React-61DAFB?logo=react&logoColor=black) ![Docker](https://img.shields.io/badge/Docker-2496ED?logo=docker&logoColor=white) ![Intermediate](https://img.shields.io/badge/●_Intermediate-yellow) | OWASP API Top 10 • ML fuzzing • GraphQL/SOAP testing
[Source Code](./PROJECTS/intermediate/api-security-scanner) \| [Docs](./PROJECTS/intermediate/api-security-scanner/learn) | | **[Wireless Deauth Detector](./SYNOPSES/intermediate/Wireless.Deauth.Detector.md)**
Monitor WiFi deauth attacks | ![2-4d](https://img.shields.io/badge/⏱️_2--4d-blue) ![Rust](https://img.shields.io/badge/Rust-000000?logo=rust&logoColor=white) ![Intermediate](https://img.shields.io/badge/●_Intermediate-yellow) | Wireless security • Packet sniffing • Attack detection
[Learn More](./SYNOPSES/intermediate/Wireless.Deauth.Detector.md) | -| **[Credential Harvester](./SYNOPSES/intermediate/Nim.Credential.Harvester.md)**
Post-exploitation credential collection | ![2-4d](https://img.shields.io/badge/⏱️_2--4d-blue) ![Nim](https://img.shields.io/badge/Nim-FFE953?logo=nim&logoColor=black) ![Intermediate](https://img.shields.io/badge/●_Intermediate-yellow) | Credential extraction • Browser forensics • Red team tooling • Nim language
[Learn More](./SYNOPSES/intermediate/Nim.Credential.Harvester.md) | -| **[Binary Analysis Tool](./PROJECTS/intermediate/binary-analysis-tool)**
Disassemble and analyze executables | ![3-5d](https://img.shields.io/badge/⏱️_3--5d-blue) ![Rust](https://img.shields.io/badge/Rust-000000?logo=rust&logoColor=white) ![Intermediate](https://img.shields.io/badge/●_Intermediate-yellow) | Binary analysis • String extraction • Malware detection
[Source Code](./PROJECTS/intermediate/binary-analysis-tool) \| [Docs](./PROJECTS/intermediate/binary-analysis-tool/learn) | +| **[Credential Enumeration](./SYNOPSES/intermediate/Nim.Credential.Enumeration.md)**
Post-exploitation credential collection | ![2-4d](https://img.shields.io/badge/⏱️_2--4d-blue) ![Nim](https://img.shields.io/badge/Nim-FFE953?logo=nim&logoColor=black) ![Intermediate](https://img.shields.io/badge/●_Intermediate-yellow) | Credential extraction • Browser forensics • Red team tooling • Nim language
[Learn More](./SYNOPSES/intermediate/Nim.Credential.Enumeration.md) | +| **[Binary Analysis Tool](./SYNOPSES/intermediate/Binary.Analysis.Tool.md)**
Disassemble and analyze executables | ![3-5d](https://img.shields.io/badge/⏱️_3--5d-blue) ![Rust](https://img.shields.io/badge/Rust-000000?logo=rust&logoColor=white) ![Intermediate](https://img.shields.io/badge/●_Intermediate-yellow) | Binary analysis • String extraction • Malware detection
[Learn More](./SYNOPSES/intermediate/Binary.Analysis.Tool.md) | | **[Chaos Engineering Security Tool](./SYNOPSES/intermediate/Chaos.Engineering.Security.Tool.md)**
Inject security failures to test resilience | ![3-5d](https://img.shields.io/badge/⏱️_3--5d-blue) ![Go](https://img.shields.io/badge/Go-00ADD8?logo=go&logoColor=white) ![Intermediate](https://img.shields.io/badge/●_Intermediate-yellow) | Chaos engineering • Security resilience • Credential spraying • Auth testing
[Learn More](./SYNOPSES/intermediate/Chaos.Engineering.Security.Tool.md) | | **[Credential Rotation Enforcer](./SYNOPSES/intermediate/Credential.Rotation.Enforcer.md)**
Track and enforce credential rotation policies | ![2-4d](https://img.shields.io/badge/⏱️_2--4d-blue) ![Python](https://img.shields.io/badge/Python-3776AB?logo=python&logoColor=white) ![Intermediate](https://img.shields.io/badge/●_Intermediate-yellow) | Credential hygiene • Secret rotation • Compliance dashboards • API integration
[Learn More](./SYNOPSES/intermediate/Credential.Rotation.Enforcer.md) | | **[Race Condition Exploiter](./SYNOPSES/intermediate/Race.Condition.Exploiter.md)**
TOCTOU race condition attack & defense lab | ![3-5d](https://img.shields.io/badge/⏱️_3--5d-blue) ![FastAPI](https://img.shields.io/badge/FastAPI-009688?logo=fastapi) ![React](https://img.shields.io/badge/React-61DAFB?logo=react&logoColor=black) ![Intermediate](https://img.shields.io/badge/●_Intermediate-yellow) | TOCTOU attacks • Double-spend bugs • Concurrent exploitation • Race visualization
[Learn More](./SYNOPSES/intermediate/Race.Condition.Exploiter.md) | @@ -141,4 +141,4 @@ Tools, courses, certifications, communities, and frameworks for cybersecurity pr ## License -AGPL 3.0 +AGPL 3.0 \ No newline at end of file diff --git a/ROADMAPS/APPLICATION-SECURITY.md b/ROADMAPS/APPLICATION-SECURITY.md index 6d82b957..fa6e2ff2 100644 --- a/ROADMAPS/APPLICATION-SECURITY.md +++ b/ROADMAPS/APPLICATION-SECURITY.md @@ -8,6 +8,10 @@ Application Security professionals work with development teams to build secure s --- +> **Want 1-on-1 guidance through this path?** I offer a mentorship program where I personally help you get certified, build real projects for your GitHub, rewrite your resume, and land your first cybersecurity role. **[Learn more](https://certgames.com/mentorship)** + +--- + ## Certification Path | Level | Certification | Organization | Link | @@ -313,4 +317,8 @@ Build application security skills with these projects: --- +> **This is a lot to tackle alone.** If you want someone guiding you through the certifications, building your projects, and getting your resume right — my 1-on-1 mentorship covers the full process for 90 days. **[certgames.com/mentorship](https://certgames.com/mentorship)** + +--- + [Back to All Roadmaps](./README.md) diff --git a/ROADMAPS/CLOUD-SECURITY-ENGINEER.md b/ROADMAPS/CLOUD-SECURITY-ENGINEER.md index 617499f7..d72c06a0 100644 --- a/ROADMAPS/CLOUD-SECURITY-ENGINEER.md +++ b/ROADMAPS/CLOUD-SECURITY-ENGINEER.md @@ -8,6 +8,10 @@ Cloud Security Engineers design, implement, and maintain security controls for c --- +> **Want 1-on-1 guidance through this path?** I offer a mentorship program where I personally help you get certified, build real projects for your GitHub, rewrite your resume, and land your first cybersecurity role. **[Learn more](https://certgames.com/mentorship)** + +--- + ## Certification Path | Level | Certification | Organization | Link | @@ -241,4 +245,8 @@ Build cloud security skills with these projects: --- +> **This is a lot to tackle alone.** If you want someone guiding you through the certifications, building your projects, and getting your resume right — my 1-on-1 mentorship covers the full process for 90 days. **[certgames.com/mentorship](https://certgames.com/mentorship)** + +--- + [Back to All Roadmaps](./README.md) diff --git a/ROADMAPS/GRC-ANALYST.md b/ROADMAPS/GRC-ANALYST.md index ead6ce9e..ffeeb868 100644 --- a/ROADMAPS/GRC-ANALYST.md +++ b/ROADMAPS/GRC-ANALYST.md @@ -8,6 +8,10 @@ GRC Analysts ensure organizations meet security standards and regulatory require --- +> **Want 1-on-1 guidance through this path?** I offer a mentorship program where I personally help you get certified, build real projects for your GitHub, rewrite your resume, and land your first cybersecurity role. **[Learn more](https://certgames.com/mentorship)** + +--- + ## Certification Path | Level | Certification | Organization | Link | @@ -258,4 +262,8 @@ Understand GRC through practical application: --- +> **This is a lot to tackle alone.** If you want someone guiding you through the certifications, building your projects, and getting your resume right — my 1-on-1 mentorship covers the full process for 90 days. **[certgames.com/mentorship](https://certgames.com/mentorship)** + +--- + [Back to All Roadmaps](./README.md) diff --git a/ROADMAPS/INCIDENT-RESPONDER.md b/ROADMAPS/INCIDENT-RESPONDER.md index b080b108..5e6d9f7f 100644 --- a/ROADMAPS/INCIDENT-RESPONDER.md +++ b/ROADMAPS/INCIDENT-RESPONDER.md @@ -8,6 +8,10 @@ Incident Responders are the front-line defenders when security incidents occur. --- +> **Want 1-on-1 guidance through this path?** I offer a mentorship program where I personally help you get certified, build real projects for your GitHub, rewrite your resume, and land your first cybersecurity role. **[Learn more](https://certgames.com/mentorship)** + +--- + ## Certification Path | Level | Certification | Organization | Link | @@ -200,4 +204,8 @@ Practice incident response skills with these projects: --- +> **This is a lot to tackle alone.** If you want someone guiding you through the certifications, building your projects, and getting your resume right — my 1-on-1 mentorship covers the full process for 90 days. **[certgames.com/mentorship](https://certgames.com/mentorship)** + +--- + [Back to All Roadmaps](./README.md) diff --git a/ROADMAPS/NETWORK-ENGINEER.md b/ROADMAPS/NETWORK-ENGINEER.md index aae1e30e..de9452b6 100644 --- a/ROADMAPS/NETWORK-ENGINEER.md +++ b/ROADMAPS/NETWORK-ENGINEER.md @@ -8,6 +8,10 @@ Network Engineers with security focus build and maintain secure network infrastr --- +> **Want 1-on-1 guidance through this path?** I offer a mentorship program where I personally help you get certified, build real projects for your GitHub, rewrite your resume, and land your first cybersecurity role. **[Learn more](https://certgames.com/mentorship)** + +--- + ## Certification Path | Level | Certification | Organization | Link | @@ -288,4 +292,8 @@ Build network security skills with these projects: --- +> **This is a lot to tackle alone.** If you want someone guiding you through the certifications, building your projects, and getting your resume right — my 1-on-1 mentorship covers the full process for 90 days. **[certgames.com/mentorship](https://certgames.com/mentorship)** + +--- + [Back to All Roadmaps](./README.md) diff --git a/ROADMAPS/PENTESTER.md b/ROADMAPS/PENTESTER.md index 29512b1e..2115c116 100644 --- a/ROADMAPS/PENTESTER.md +++ b/ROADMAPS/PENTESTER.md @@ -8,6 +8,10 @@ Penetration testers ethically hack systems to identify vulnerabilities before ma --- +> **Want 1-on-1 guidance through this path?** I offer a mentorship program where I personally help you get certified, build real projects for your GitHub, rewrite your resume, and land your first cybersecurity role. **[Learn more](https://certgames.com/mentorship)** + +--- + ## Certification Path | Level | Certification | Organization | Link | @@ -170,4 +174,8 @@ Build pentesting skills with these projects: --- +> **This is a lot to tackle alone.** If you want someone guiding you through the certifications, building your projects, and getting your resume right — my 1-on-1 mentorship covers the full process for 90 days. **[certgames.com/mentorship](https://certgames.com/mentorship)** + +--- + [Back to All Roadmaps](./README.md) diff --git a/ROADMAPS/README.md b/ROADMAPS/README.md index 114e53b9..4841bf39 100644 --- a/ROADMAPS/README.md +++ b/ROADMAPS/README.md @@ -66,6 +66,10 @@ Structured certification paths for different cybersecurity career tracks. Each r --- +> **Want 1-on-1 guidance?** I offer a mentorship program where I personally help you get certified, build real projects for your GitHub, rewrite your resume, and land your first cybersecurity role. **[Learn more](https://certgames.com/mentorship)** + +--- + ## How to Use These Roadmaps 1. **Choose Your Path** - Select the role that aligns with your career goals @@ -126,6 +130,12 @@ Structured certification paths for different cybersecurity career tracks. Each r --- +--- + +> **This is a lot to tackle alone.** If you want someone guiding you through the certifications, building your projects, and getting your resume right — my 1-on-1 mentorship covers the full process for 90 days. **[certgames.com/mentorship](https://certgames.com/mentorship)** + +--- + **Last Updated:** January 2026 [Back to Main README](../README.md) diff --git a/ROADMAPS/SECURITY-ARCHITECT.md b/ROADMAPS/SECURITY-ARCHITECT.md index d0bb6be0..1a39daa4 100644 --- a/ROADMAPS/SECURITY-ARCHITECT.md +++ b/ROADMAPS/SECURITY-ARCHITECT.md @@ -8,6 +8,10 @@ Security Architects design the overall security infrastructure for organizations --- +> **Want 1-on-1 guidance through this path?** I offer a mentorship program where I personally help you get certified, build real projects for your GitHub, rewrite your resume, and land your first cybersecurity role. **[Learn more](https://certgames.com/mentorship)** + +--- + ## Certification Path | Level | Certification | Organization | Link | @@ -231,4 +235,8 @@ Understand architecture through implementation: --- +> **This is a lot to tackle alone.** If you want someone guiding you through the certifications, building your projects, and getting your resume right — my 1-on-1 mentorship covers the full process for 90 days. **[certgames.com/mentorship](https://certgames.com/mentorship)** + +--- + [Back to All Roadmaps](./README.md) diff --git a/ROADMAPS/SECURITY-ENGINEER.md b/ROADMAPS/SECURITY-ENGINEER.md index 6ed54a8d..0c3d793d 100644 --- a/ROADMAPS/SECURITY-ENGINEER.md +++ b/ROADMAPS/SECURITY-ENGINEER.md @@ -8,6 +8,10 @@ Security Engineers build and maintain the technical security infrastructure that --- +> **Want 1-on-1 guidance through this path?** I offer a mentorship program where I personally help you get certified, build real projects for your GitHub, rewrite your resume, and land your first cybersecurity role. **[Learn more](https://certgames.com/mentorship)** + +--- + ## Certification Path | Level | Certification | Organization | Link | @@ -176,4 +180,8 @@ Build security engineering skills with these projects: --- +> **This is a lot to tackle alone.** If you want someone guiding you through the certifications, building your projects, and getting your resume right — my 1-on-1 mentorship covers the full process for 90 days. **[certgames.com/mentorship](https://certgames.com/mentorship)** + +--- + [Back to All Roadmaps](./README.md) diff --git a/ROADMAPS/SOC-ANALYST.md b/ROADMAPS/SOC-ANALYST.md index b6a3d906..403d2d83 100644 --- a/ROADMAPS/SOC-ANALYST.md +++ b/ROADMAPS/SOC-ANALYST.md @@ -8,6 +8,10 @@ SOC Analysts monitor, detect, investigate, and respond to cybersecurity threats. --- +> **Want 1-on-1 guidance through this path?** I offer a mentorship program where I personally help you get certified, build real projects for your GitHub, rewrite your resume, and land your first cybersecurity role. **[Learn more](https://certgames.com/mentorship)** + +--- + ## Certification Path | Level | Certification | Organization | Link | @@ -139,4 +143,8 @@ Practice SOC skills with these projects: --- +> **This is a lot to tackle alone.** If you want someone guiding you through the certifications, building your projects, and getting your resume right — my 1-on-1 mentorship covers the full process for 90 days. **[certgames.com/mentorship](https://certgames.com/mentorship)** + +--- + [Back to All Roadmaps](./README.md) diff --git a/ROADMAPS/THREAT-INTELLIGENCE-ANALYST.md b/ROADMAPS/THREAT-INTELLIGENCE-ANALYST.md index 8276134f..b24b3188 100644 --- a/ROADMAPS/THREAT-INTELLIGENCE-ANALYST.md +++ b/ROADMAPS/THREAT-INTELLIGENCE-ANALYST.md @@ -8,6 +8,10 @@ Threat Intelligence Analysts research adversaries, analyze attack patterns, and --- +> **Want 1-on-1 guidance through this path?** I offer a mentorship program where I personally help you get certified, build real projects for your GitHub, rewrite your resume, and land your first cybersecurity role. **[Learn more](https://certgames.com/mentorship)** + +--- + ## Certification Path | Level | Certification | Organization | Link | @@ -259,4 +263,8 @@ Build threat intelligence skills with these projects: --- +> **This is a lot to tackle alone.** If you want someone guiding you through the certifications, building your projects, and getting your resume right — my 1-on-1 mentorship covers the full process for 90 days. **[certgames.com/mentorship](https://certgames.com/mentorship)** + +--- + [Back to All Roadmaps](./README.md) diff --git a/docs/plans/2026-04-01-credential-enumeration-audit-fixes.md b/docs/plans/2026-04-01-credential-enumeration-audit-fixes.md new file mode 100644 index 00000000..e4545b73 --- /dev/null +++ b/docs/plans/2026-04-01-credential-enumeration-audit-fixes.md @@ -0,0 +1,591 @@ +# Credential Enumeration Audit + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans +> to implement this plan task-by-task. + +**Goal:** Address all gaps identified in the audit. + +**Architecture:** All changes are modifications to existing files unless noted. + +**Tech Stack:** Nim 2.2+, Docker, Bash (Justfile) + +--- + +## Impression + +Solid architecture for a Nim CLI tool — clean type hierarchy, consistent +`{.push raises: [].}` discipline, well-structured collector pattern. The +bones are genuinely good. But two of the command-detection patterns silently +match nothing, the terminal box renderer computes stats it never prints, +and the only test mechanism (Docker) can't actually build because the +Justfile passes the wrong build context. The tool scans 7 credential +categories competently but misses several high-value targets (.netrc, +npm/pip tokens, Terraform, Vault) that a real post-access operator would +check first. + +## Project Assessment + +**Type:** Rule-based credential detection CLI tool (post-access) +**Primary Axis:** Completeness — weighted 65/35 over code quality +**Why:** A scanner's value is directly proportional to what it catches. +Missing a credential category is a harder failure than a rendering bug. + +## Findings + +### Finding 1: Docker test build context is wrong — entire test pipeline broken +**Severity:** CRITICAL +**Axis:** Code Quality +**Files:** Justfile:88-89, tests/docker/Dockerfile:1-12 + +**Issue:** The Justfile recipe `docker-build` runs +`docker build -t credenum-test tests/docker`, setting the build context to +`tests/docker/`. But the Dockerfile's first stage copies `src/`, `config.nims`, +and `credential-enumeration.nimble` from the build context root — none of which +exist under `tests/docker/`. The build fails immediately with +"COPY failed: file not found in build context." + +**Proof:** The Dockerfile contains: +```dockerfile +COPY src/ src/ +COPY config.nims . +COPY credential-enumeration.nimble . +``` +With context `tests/docker/`, Docker looks for `tests/docker/src/`, +`tests/docker/config.nims`, `tests/docker/credential-enumeration.nimble`. +None exist — `find tests/docker/ -name "config.nims"` returns nothing. +The only test mechanism for this project has never run successfully with +this Justfile recipe. + +**Proof Check:** Confidence: HIGH — Docker build context semantics are deterministic; +this is not a maybe. + +**Fix:** +`Justfile:88-89` — change the docker-build recipe to use the project root as context: +```just +[group('test')] +docker-build: + docker build -t credenum-test -f tests/docker/Dockerfile . +``` +And update `docker-test` accordingly (it depends on docker-build, so no change needed +there since it just `docker run`s the image). + +**Test:** +```bash +just docker-build +``` + +--- + +### Finding 2: matchesCommandPattern has case mismatch — 2/7 patterns are dead code +**Severity:** CRITICAL +**Axis:** Code Quality +**Files:** src/collectors/history.nim:38-54, src/config.nim:120-128 + +**Issue:** `matchesCommandPattern` lowercases the input line (`line.toLowerAscii()`) +then searches for pattern fragments that contain uppercase characters. Two patterns +are affected: + +- `"curl.*-H.*[Aa]uthoriz"` splits into `["curl", "-H", "[Aa]uthoriz"]` — + `-H` (uppercase) will never be found in a lowercased string, and + `[Aa]uthoriz` is treated as a literal (not a character class) +- `"wget.*--header.*[Aa]uthoriz"` splits into `["wget", "--header", "[Aa]uthoriz"]` — + `[Aa]uthoriz` is literal and will never appear in real history + +This means `curl -H "Authorization: Bearer ..."` commands in shell history +are silently missed — one of the most common credential-leaking patterns. + +**Proof:** Trace through `matchesCommandPattern` with input +`curl -H "Authorization: Bearer token" https://api.example.com`: +1. `lower` = `curl -h "authorization: bearer token" https://api.example.com` +2. Pattern `"curl.*-H.*[Aa]uthoriz"` → parts = `["curl", "-H", "[Aa]uthoriz"]` +3. `lower.find("curl")` → found at 0 +4. `lower.find("-H")` → NOT FOUND (lowercase string has `-h`, not `-H`) +5. `allFound = false` → returns false + +The pattern never matches. The planted test data in `.bash_history` line 4 +has `curl -H "Authorization: ..."` which should trigger this pattern but +the validate.sh check labeled "Sensitive command" passes only because +OTHER patterns (like `sshpass`, `mysql.*-p`) produce matches. + +**Proof Check:** Confidence: HIGH — Nim's `find` is case-sensitive by default; +this is deterministic. + +**Fix:** +`src/config.nim:120-128` — lowercase all pattern fragments: +```nim +HistoryCommandPatterns* = [ + "curl.*-h.*authoriz", + "curl.*-u ", + "wget.*--header.*authoriz", + "wget.*--password", + "mysql.*-p", + "psql.*password", + "sshpass" +] +``` + +**Test:** +Add a Docker test assertion that specifically validates curl -H Authorization +detection. After fix, run `just docker-test`. + +--- + +### Finding 3: Module header stats computed but never rendered +**Severity:** MAJOR +**Axis:** Code Quality +**Files:** src/output/terminal.nim:40-57 + +**Issue:** `renderModuleHeader` computes a `stats` string containing +the finding count and duration, but the padding calculation +`padLen - stats.len + stats.len` simplifies to just `padLen` — then +writes padding spaces without ever writing `stats` to stdout. +The finding count and per-module duration are silently dropped from output. + +**Proof:** The arithmetic: +```nim +let stats = $findingCount & " findings" & ColorDim & " (" & $durationMs & "ms)" & ColorReset +let padLen = 76 - name.len - desc.len - 5 +if padLen > 0: + stdout.write " ".repeat(padLen - stats.len + stats.len) # = " ".repeat(padLen) +stdout.writeLine " " & BoxVertical +``` +`stats` is never passed to `stdout.write`. The line is equivalent to +`stdout.write " ".repeat(padLen)` followed by the box border — no stats +anywhere. + +**Proof Check:** Confidence: HIGH — the variable is computed and never +appears in any write call in the function. + +**Fix:** +`src/output/terminal.nim:51-55` — compute visual width (excluding ANSI codes), +pad to fill the box, then write stats: +```nim +proc visualLen(s: string): int = + var i = 0 + while i < s.len: + if s[i] == '\e': + while i < s.len and s[i] != 'm': + inc i + inc i + else: + inc result + inc i + +proc renderModuleHeader(name: string, desc: string, findingCount: int, durationMs: int64) = + try: + stdout.writeLine boxLine(78) + stdout.write BoxVertical & " " + stdout.write ColorBold & ColorCyan + stdout.write name.toUpperAscii() + stdout.write ColorReset + stdout.write ColorDim + stdout.write " " & Arrow & " " & desc + stdout.write ColorReset + + let stats = $findingCount & " findings" & ColorDim & " (" & $durationMs & "ms)" & ColorReset + let usedWidth = 2 + name.len + 3 + desc.len + let statsVisual = visualLen(stats) + let padLen = 78 - usedWidth - statsVisual - 2 + if padLen > 0: + stdout.write " ".repeat(padLen) + stdout.write stats + stdout.writeLine " " & BoxVertical + stdout.writeLine boxMid(78) + except CatchableError: + discard +``` + +**Test:** +```bash +just run --target /tmp | head -20 +``` +Verify module headers show "N findings (Xms)" right-aligned. + +--- + +### Finding 4: Terminal box right-border alignment broken for variable content +**Severity:** MAJOR +**Axis:** Code Quality +**Files:** src/output/terminal.nim:60-84, 98-126 + +**Issue:** `renderFinding` writes descriptions and paths of arbitrary length +then appends `" " & BoxVertical` with no padding to reach column 78. Long +descriptions push past the box. Short ones leave the right border floating +at different positions. Same issue in `renderSummary` — hardcoded +`" ".repeat(69)` and `" ".repeat(20)` assume fixed content widths that +vary with finding counts, module counts, and durations. + +**Proof:** A finding with path `/home/user/.config/google-chrome/Default/Login Data` +(49 chars) plus permissions `[0644]` plus modified timestamp is ~90+ chars of +content in a 78-char box. The right `BoxVertical` gets pushed to column ~95. +A finding with path `/home/user/.pgpass` (18 chars) leaves the right border +at ~column 50. + +**Proof Check:** Confidence: HIGH — the code has zero width calculation before +writing the trailing BoxVertical. + +**Fix:** +Create a `padWrite` helper that calculates visual width of content written so +far and pads to fill the 78-char box before writing the closing border. +Apply it to `renderFinding`, `renderSummary`, and `renderModuleErrors`. +Truncate content that would exceed box width. + +In `src/output/terminal.nim`, add the `visualLen` proc from Finding 3 +(shared), then refactor each line that writes content + BoxVertical: +```nim +proc padToBox(content: string, boxWidth: int = 78) = + let vLen = visualLen(content) + let pad = boxWidth - vLen - 1 + if pad > 0: + stdout.write " ".repeat(pad) + stdout.writeLine BoxVertical +``` + +Then each finding line becomes: +```nim +var line = BoxVertical & " " & sevBadge(f.severity) & " " & f.description +stdout.write line +padToBox(line) +``` + +Apply this pattern consistently to all content rows in the terminal renderer. + +**Test:** +```bash +just docker-test +``` +Visual inspection of terminal output — all right borders should align at column 78. + +--- + +### Finding 5: scanGitCredentials reports svHigh for empty credential files +**Severity:** MAJOR +**Axis:** Code Quality +**Files:** src/collectors/git.nim:11-39 + +**Issue:** If `.git-credentials` exists but is empty or contains no valid URLs, +`credCount` stays at 0 but the function still creates a finding with +"Plaintext Git credential store with 0 entries" at severity svHigh +(or svCritical if world-readable). An empty file is not a high-severity +credential exposure. + +**Proof:** Trace through `scanGitCredentials` with an empty `.git-credentials`: +1. `safeFileExists` returns true +2. `readFileLines` returns `@[]` +3. Loop runs zero iterations, `credCount = 0` +4. Code falls through to create credential and finding with `svHigh` +5. Report shows "Plaintext Git credential store with 0 entries" as HIGH + +**Proof Check:** Confidence: HIGH — there is no guard checking `credCount > 0` +before creating the finding. + +**Fix:** +`src/collectors/git.nim` — add early return after counting: +```nim +if credCount == 0: + return +``` +Insert after the for-loop that counts credentials (after line 22), before +the credential/finding construction. + +**Test:** +Create an empty `.git-credentials` file, run scanner, verify no git finding +appears. + +--- + +### Finding 6: `just test` references non-existent test_all.nim +**Severity:** MAJOR +**Axis:** Code Quality +**Files:** Justfile:84-85 + +**Issue:** The Justfile `test` recipe runs `nim c -r tests/test_all.nim`, +but this file does not exist. There are no unit tests in the project. +The only testing is Docker-based integration testing (validate.sh), which +itself is broken (Finding 1). + +**Proof:** `test -f tests/test_all.nim` returns non-zero. The `tests/` +directory contains only `docker/`. + +**Proof Check:** Confidence: HIGH — file does not exist. + +**Fix:** +Create `tests/test_all.nim` with unit tests for each collector's core logic. +At minimum, test: +- `isPrivateKey` with various key headers +- `isEncrypted` with encrypted/unencrypted markers +- `matchesSecretPattern` with positive and negative cases +- `matchesCommandPattern` (after fixing Finding 2) with all 7 patterns +- `redactValue` edge cases +- `permissionSeverity` logic +- `parseModules` from CLI parsing + +These should be fast, in-process tests that don't require Docker or +real credential files. + +**Test:** +```bash +just test +``` + +--- + +### Finding 7: Missing credential categories — .netrc, npm/pip tokens, Terraform, Vault, GitHub CLI +**Severity:** MAJOR +**Axis:** Completeness +**Files:** src/config.nim, src/collectors/apptoken.nim + +**Issue:** The tool covers 7 categories but misses several high-value +credential stores that a post-access operator would check: + +| Missing Target | Path | Why It Matters | +|---|---|---| +| `.netrc` | `~/.netrc` | Universal HTTP auth store; Heroku, Artifactory, many tools | +| `.npmrc` | `~/.npmrc` | npm registry auth tokens (`_authToken=`) | +| `.pypirc` | `~/.pypirc` | PyPI upload tokens | +| GitHub CLI | `~/.config/gh/hosts.yml` | GitHub OAuth tokens | +| Terraform | `~/.terraform.d/credentials.tfrc.json` | Terraform Cloud API tokens | +| Vault | `~/.vault-token` | HashiCorp Vault root/user tokens | +| `~/.config/helm/repositories.yaml` | Helm chart repo credentials | +| `~/.config/rclone/rclone.conf` | Cloud storage credentials (S3, GCS, etc.) | + +Industry comparison: LaZagne (closest post-access tool) covers 20+ +credential categories on Linux alone. `truffleHog` detects 700+ secret +patterns. This tool's 7 categories leave real coverage gaps. + +**Proof:** `grep -r "netrc\|npmrc\|pypirc\|vault-token\|terraform\|gh/hosts" src/` +returns zero matches. + +**Proof Check:** Confidence: HIGH — the files are either scanned or they're not. + +**Fix:** +Add constants to `src/config.nim`: +```nim +const + NetrcFile* = ".netrc" + NpmrcFile* = ".npmrc" + PypircFile* = ".pypirc" + GhCliHosts* = ".config/gh/hosts.yml" + TerraformCreds* = ".terraform.d/credentials.tfrc.json" + VaultTokenFile* = ".vault-token" + HelmRepos* = ".config/helm/repositories.yaml" + RcloneConf* = ".config/rclone/rclone.conf" +``` + +Add scanning logic to `src/collectors/apptoken.nim` — each is a simple +file-exists-and-check-contents pattern, consistent with existing +`scanDbCredFiles` approach. `.netrc` deserves content parsing (look for +`password` or `login` tokens). `.npmrc` should check for `_authToken=`. +`.pypirc` should check for `password` under `[pypi]` section. + +**Test:** +Add planted files to `tests/docker/planted/` and assertions to `validate.sh`. + +--- + +### Finding 8: matchesExclude uses substring matching, not glob patterns +**Severity:** MINOR +**Axis:** Code Quality +**Files:** src/collectors/base.nim:90-94 + +**Issue:** `matchesExclude` checks `if pattern in path` — plain substring. +An exclude pattern of `"env"` would exclude `/home/user/.venv/something`, +`/home/user/environment/data`, and the intended `.env` file. The CLI help +says `--exclude ` suggesting glob behavior, but the implementation +is substring containment. + +**Proof:** `matchesExclude("/home/user/.venv/lib/site.py", @["env"])` +returns `true`, excluding a Python virtualenv file that has nothing to do +with environment secrets. + +**Proof Check:** Confidence: HIGH — `in` is Nim's substring containment +operator for strings. + +**Fix:** +`src/collectors/base.nim:90-94` — use `std/os.extractFilename` and simple +glob matching, or at minimum document that patterns are substrings. Better +fix: use Nim's `std/strutils.contains` with path-segment awareness: +```nim +proc matchesExclude*(path: string, patterns: seq[string]): bool = + let name = path.extractFilename() + for pattern in patterns: + if pattern in name or pattern in path.splitPath().head: + return true +``` + +Or implement basic glob support with `*` matching. + +**Test:** +Unit test that `.venv/lib/site.py` is NOT excluded by pattern `".env"`. + +--- + +### Finding 9: JSON renderJson silently discards file-write errors +**Severity:** MINOR +**Axis:** Code Quality +**Files:** src/output/json.nim:72-85 + +**Issue:** When `--output ` specifies an invalid path (read-only dir, +nonexistent parent), `writeFile` throws, the exception is caught and +discarded. The JSON is then also written to stdout, but if stdout is +redirected and also fails, both errors are silently swallowed. The user +gets zero indication that their requested output file was not created. + +**Proof:** Run `credenum --format json --output /root/nope.json` as +non-root — the file write fails silently, output goes only to stdout. +If stdout is piped to a broken pipe, both writes fail and the user +sees nothing. + +**Proof Check:** Confidence: MEDIUM — the stdout fallback usually works, +so the practical impact is limited to the file path case. + +**Fix:** +`src/output/json.nim:77-80` — write a warning to stderr on file write failure: +```nim +except CatchableError as e: + try: + stderr.writeLine "Warning: could not write to " & outputPath & ": " & e.msg + except CatchableError: + discard +``` + +**Test:** +```bash +just run --format json --output /dev/full 2>&1 | grep "Warning" +``` + +--- + +### Finding 10: redactLine strips leading quote but keeps trailing quote +**Severity:** MINOR +**Axis:** Code Quality +**Files:** src/collectors/history.nim:15-28 + +**Issue:** `redactLine` strips a leading `"` or `'` from the value via +`value[1 .. ^1]`, but `^1` is the last index in Nim (inclusive), so +this removes only the first character. Input `"secret"` becomes +`secret"` — the trailing quote survives into the redacted preview. + +**Proof:** Input line `export API_KEY="mysecret"`: +1. `eqIdx` = 14 (position of `=`) +2. `value` = `"mysecret"` (after strip) +3. `value.startsWith("\"")` → true +4. `cleanValue` = `value[1 .. ^1]` = `mysecret"` (trailing quote kept) +5. `redactValue("mysecret\"", 4)` = `myse****"` + +**Proof Check:** Confidence: HIGH — `^1` is the last character in Nim slice +notation; this is deterministic. + +**Fix:** +`src/collectors/history.nim:24-26`: +```nim +let cleanValue = if (value.startsWith("\"") and value.endsWith("\"")) or + (value.startsWith("'") and value.endsWith("'")): + value[1 ..< ^1] +else: + value +``` + +Note: `^1` in `[1 ..< ^1]` excludes the last character (half-open range). + +**Test:** +Unit test: `redactLine("export KEY=\"secret\"")` should produce `KEY=secr**` +with no trailing quote. + +--- + +### Finding 11: isRelative computed but unused in Firefox profile parsing +**Severity:** MINOR +**Axis:** Code Quality +**Files:** src/collectors/browser.nim:11-48 + +**Issue:** The `scanFirefox` proc parses `IsRelative=0` from profiles.ini +and stores it in `isRelative`, but this variable is never read. Profile +path resolution uses `profile.startsWith("/")` instead. The variable is +dead code from an abandoned design path. + +**Proof:** `isRelative` is set on lines 23 and 37, but never appears in +any conditional or expression after the parsing loop. + +**Proof Check:** Confidence: HIGH — grep for `isRelative` in browser.nim +shows only assignments, zero reads. + +**Fix:** +`src/collectors/browser.nim` — remove the `isRelative` variable entirely +(lines 23, 37). The `startsWith("/")` check on line 43 is sufficient for +Linux path detection. + +**Test:** +```bash +just check +``` +Verify compilation succeeds with no warnings about unused variable. + +--- + +### Finding 12: Azure scanner adds directory finding unconditionally +**Severity:** MINOR +**Axis:** Code Quality +**Files:** src/collectors/cloud.nim:140-144 + +**Issue:** `scanAzure` always adds an svInfo finding for the Azure CLI +directory after checking for specific token files. If token cache findings +were already added, this creates redundant noise. If no tokens were found, +a bare directory finding at svInfo adds very little value. + +**Proof:** If `~/.azure/` exists with `accessTokens.json`, the output shows: +1. "Azure token cache" at svMedium — useful +2. "Azure CLI configuration directory" at svInfo — noise, adds nothing + +**Proof Check:** Confidence: MEDIUM — it's noise, not incorrect data. Could +argue the directory finding is useful as a "this user has Azure CLI installed" +signal, but only if no token files were found. + +**Fix:** +`src/collectors/cloud.nim:140-144` — only add the directory finding if no +token files were found: +```nim +if result.findings.len == 0 or + result.findings[^1].category != catCloud: + result.findings.add(makeFinding( + azDir, + "Azure CLI configuration directory", + catCloud, svInfo + )) +``` + +Better: track whether any Azure-specific findings were added and only emit +the directory finding as a fallback. + +**Test:** +Docker test — verify Azure directory finding only appears when no token +findings exist. + +--- + +## Self-Interrogation + +Looking at these 12 findings as a whole: + +- **Did I miss a dimension?** The tool has no rate-limiting or size-limiting on + file reads. `readFileContent` reads entire files into memory. A malicious + (or just large) `.bash_history` of several GB would cause OOM. But the + history scanner has `MaxHistoryLines = 50000` via `readFileLines`, which + mitigates this for its use case. Other collectors reading full files + (git config, kubeconfig) are typically small. Not worth a finding. + +- **Are any findings weak?** Finding 12 (Azure directory) is the weakest — + it's a UX preference, not a bug. Keeping it as MINOR is appropriate. + Finding 11 (dead variable) is real but trivial. Everything MAJOR and above + is solid. + +- **Completeness check:** The tool has 7 modules covering the major + categories but Finding 7 lists 8 specific credential stores that any + practitioner would expect. The `.netrc` omission alone is notable since + it's been the standard Unix credential store since the 1980s. + +## Summary + +**Total Findings:** 12 (2 critical, 5 major, 5 minor) +**Code Quality Findings:** 11 +**Completeness Findings:** 1 diff --git a/docs/superpowers/specs/2026-04-01-credential-enumeration-design.md b/docs/superpowers/specs/2026-04-01-credential-enumeration-design.md new file mode 100644 index 00000000..959bca97 --- /dev/null +++ b/docs/superpowers/specs/2026-04-01-credential-enumeration-design.md @@ -0,0 +1,257 @@ +# Credential Enumeration Tool — Design Spec + +## Overview + +A post-access credential enumeration tool written in Nim that scans Linux systems for exposed secrets across 7 categories. Compiles to a single static binary with zero dependencies — drop on target, run, get a structured report of every credential file, its exposure level, and severity rating. + +**Language:** Nim 2.2.x +**Binary name:** `credenum` +**Architecture:** Modular collector pattern — one module per credential category, common interface, central runner + +--- + +## Core Types (`src/types.nim`) + +- **Severity** — enum: `info`, `low`, `medium`, `high`, `critical` +- **Category** — enum: `browser`, `ssh`, `cloud`, `history`, `keyring`, `git`, `apptoken` +- **Credential** — discovered credential data (source, credential type, value or redacted preview, metadata) +- **Finding** — a single discovery (path, category, severity, description, optional Credential, file permissions, timestamps) +- **CollectorResult** — `seq[Finding]` + collector metadata (name, duration, errors encountered) +- **HarvestConfig** — runtime configuration (target home dir, enabled modules, exclude patterns, output format, flags) +- **Report** — all collector results + summary stats + timestamp + target info + +**Severity assignment rules:** +- Critical: plaintext credentials in world-readable files +- High: unprotected private keys, plaintext credential stores +- Medium: overly permissive file permissions on credential files +- Low: credential files exist but properly permissioned +- Info: enumeration data (host lists, profile counts, existence checks) + +--- + +## Collector Modules + +Each module exports `proc collect(config: HarvestConfig): CollectorResult`. The runner calls each in sequence. No inheritance needed — just a common return type and a seq of collector procs populated at init. + +### 1. Browser Credential Store Scanner (`src/collectors/browser.nim`) +- Firefox: locate profiles via `profiles.ini`, check `logins.json`, `cookies.sqlite`, `key4.db` +- Chromium: locate `Login Data`, `Cookies`, `Web Data` SQLite databases +- Report: file locations, permissions, entry counts, last-modified timestamps +- Flag world-readable/group-readable databases as critical +- Detection + metadata level (no decryption) + +### 2. SSH Key & Config Auditor (`src/collectors/ssh.nim`) +- Scan `~/.ssh/` for private keys (RSA, Ed25519, ECDSA, non-standard filenames) +- Read key headers to determine passphrase protection (encrypted PEM vs unencrypted) +- Flag unprotected keys as high severity +- Check permissions (keys=600, directory=700) +- Parse `~/.ssh/config` — enumerate hosts, identify weak settings +- Read `authorized_keys` and `known_hosts` for enumeration + +### 3. Cloud Provider Config Scanner (`src/collectors/cloud.nim`) +- AWS: `~/.aws/credentials`, `~/.aws/config` — count profiles, identify static vs session keys +- GCP: `~/.config/gcloud/` — application default credentials, service account keys +- Azure: `~/.azure/` — access tokens, profile info +- Kubernetes: `~/.kube/config` — enumerate contexts, clusters, auth methods +- Permission checks, flag anything broader than owner-only + +### 4. Shell History & Environment Scanner (`src/collectors/history.nim`) +- Read `.bash_history`, `.zsh_history`, `.fish_history` +- Pattern match for inline secrets: KEY=, SECRET=, TOKEN=, PASSWORD= exports, DB connection strings, curl/wget with auth headers +- Scan for `.env` files in home directory tree +- Report: file, line region, redacted preview + +### 5. Keyring & Password Store Scanner (`src/collectors/keyring.nim`) +- GNOME Keyring: `~/.local/share/keyrings/` +- KDE Wallet: `~/.local/share/kwalletd/` +- KeePass/KeePassXC: search for `.kdbx` files +- pass (password-store): `~/.password-store/` +- Bitwarden: `~/.config/Bitwarden/` local vault data +- Report locations, file sizes, permissions, last modified + +### 6. Git Credential Store Scanner (`src/collectors/git.nim`) +- `~/.git-credentials` — plaintext storage (high severity) +- `~/.gitconfig` — check `credential.helper` setting +- Search for credential cache socket files +- Check for GitHub/GitLab PATs in config files + +### 7. Application Token Scanner (`src/collectors/apptoken.nim`) +- Slack: `~/.config/Slack/` session/cookie storage +- Discord: `~/.config/discord/` token storage +- VS Code: `~/.config/Code/` stored secrets +- Database configs: `~/.pgpass`, `~/.my.cnf`, Redis configs +- MQTT broker configs, common application credential files + +--- + +## CLI Interface + +``` +credenum [flags] + +Flags: + --target Target user home directory (default: current user) + --modules Comma-separated module list (default: all) + --exclude Glob patterns for paths to skip + --format Output format: terminal, json, both (default: terminal) + --output Write JSON output to file + --dry-run List paths that would be scanned without reading + --quiet Suppress banner and progress, output findings only + --verbose Show all scanned paths, not just findings +``` + +**CLI parsing:** `std/parseopt` (stdlib, no dependencies) + +--- + +## Terminal Output Design + +Hacker-aesthetic terminal output: +- ASCII art banner with tool name and version +- Box-drawing characters for section borders +- Color-coded severity badges (critical=red, high=magenta, medium=yellow, low=cyan, info=dim) +- Clean table formatting for findings +- Summary footer with totals by severity, modules scanned, duration +- Progress indicators showing which module is currently scanning + +--- + +## Output Formats + +### Terminal (ANSI) +Colored, formatted output designed for interactive use. Banner, per-module sections, severity badges, summary. + +### JSON +Structured report: +```json +{ + "metadata": { "timestamp": "...", "target": "...", "version": "...", "duration_ms": 0 }, + "modules": [ + { + "name": "ssh", + "findings": [ + { + "category": "ssh", + "severity": "high", + "path": "/home/user/.ssh/id_rsa", + "description": "Unprotected private key (no passphrase)", + "permissions": "0644", + "modified": "2026-01-15T10:30:00Z" + } + ], + "duration_ms": 12, + "errors": [] + } + ], + "summary": { "critical": 2, "high": 5, "medium": 8, "low": 3, "info": 12 } +} +``` + +--- + +## Build & Distribution + +### Static binary via musl +- `config.nims` configures musl-gcc for fully static Linux binaries +- Zero runtime dependencies + +### Cross-compilation +- x86_64-linux (primary) +- aarch64-linux (ARM64) +- Uses zig cc for cross-compilation +- Justfile tasks: `just build-x86`, `just build-arm64` + +### Build modes +- `just build` — debug build with all checks +- `just release` — optimized static binary (`-d:release -d:lto --opt:size`) +- `just release-small` — stripped + UPX compressed + +### Justfile tasks +- `just build` / `just release` / `just release-small` +- `just test` — run unit tests +- `just docker-test` — build + run in Docker test environment +- `just fmt` — format with nph +- `just clean` + +--- + +## Docker Test Environment + +**`tests/docker/Dockerfile`** — Ubuntu-based container planting fake credentials across all 7 categories: + +- SSH: test key pairs (some protected, some not), various permissions +- Browser: mock Firefox profile with dummy `logins.json`, mock Chromium dirs +- Cloud: fake AWS credentials, dummy GCP service account JSON, mock kubeconfig +- History: seeded `.bash_history`/`.zsh_history` with fake tokens +- Keyrings: mock `.kdbx`, mock `pass` store +- Git: `.git-credentials` with dummy entries +- App tokens: mock Slack/Discord/VS Code configs, `.pgpass`, `.my.cnf` + +All values are obviously fake (`AKIA_FAKE_ACCESS_KEY_12345`). + +`just docker-test` builds, runs credenum inside, validates all findings discovered with correct severity. + +--- + +## Project Structure + +``` +credential-enumeration/ +├── src/ +│ ├── harvester.nim # Entry point, CLI parsing +│ ├── config.nim # Constants, paths, patterns, severities +│ ├── types.nim # Core types +│ ├── runner.nim # Execute collectors, aggregate results +│ ├── output/ +│ │ ├── terminal.nim # ANSI terminal output with hacker aesthetic +│ │ └── json.nim # JSON serialization +│ └── collectors/ +│ ├── base.nim # Collector registration +│ ├── browser.nim +│ ├── ssh.nim +│ ├── cloud.nim +│ ├── history.nim +│ ├── keyring.nim +│ ├── git.nim +│ └── apptoken.nim +├── tests/ +│ └── docker/ +│ ├── Dockerfile +│ └── planted/ # Mock credential files +├── learn/ +│ ├── 00-OVERVIEW.md +│ ├── 01-CONCEPTS.md +│ ├── 02-ARCHITECTURE.md +│ ├── 03-IMPLEMENTATION.md +│ └── 04-CHALLENGES.md +├── config.nims # Build config (static linking, cross-compile) +├── credential-enumeration.nimble # Package manifest +├── Justfile +├── install.sh +├── README.md +├── LICENSE +└── .gitignore +``` + +--- + +## Learn Folder + +- **00-OVERVIEW.md** — What credential enumeration is, why it matters, prerequisites, quick start +- **01-CONCEPTS.md** — Linux credential storage locations, file permission model, where apps store secrets and why defaults are insecure. Real-world breach references. +- **02-ARCHITECTURE.md** — Modular collector design, data flow, why Nim for security tooling +- **03-IMPLEMENTATION.md** — Code walkthrough: core types, collector pattern, CLI parsing, output formatting, Nim type system and modules +- **04-CHALLENGES.md** — Extensions: new collectors, encrypted output, network enumeration, framework integration + +--- + +## What This Project Teaches + +- Linux credential storage locations across browsers, SSH, cloud tools, shells, keyrings, Git, and applications +- File permission models and their security implications +- Nim programming: static compilation, module system, type system, FFI potential +- Why Nim is adopted in the security assessment community (small static binaries, C-level performance) +- Modular tool architecture with common interfaces +- Building visually polished CLI tools +- Docker-based testing for security tools +- Cross-compilation and static linking for portable binaries