Browse Source

提交到个人仓库

shaofeng 6 months ago
parent
commit
2d2ad3c59a

BIN
.DS_Store


BIN
.cursor/.DS_Store


+ 161 - 0
.cursor/rules/hoon.mdc

@@ -0,0 +1,161 @@
+---
+description: hoon
+globs: 
+alwaysApply: false
+---
+{
+  "rules": [
+    {
+      "title": "Hoon Runic Structure and Syntax",
+      "description": "Hoon is a runic language where expressions begin with digraph runes corresponding to specific operation types. Always use proper rune syntax (e.g., |= for gates, ?: for if-then-else, =/ for variable assignment, |- for trap creation) and ensure correct positioning and indentation. The syntax is large but regular, forming a consistent pattern language."
+    },
+    {
+      "title": "Hoon Code Vertical Backbone",
+      "description": "Maintain Hoon's vertical 'backbone' structure that prevents deep indentation and improves readability. This structure helps quickly identify control flow and makes efficient use of screen real estate. Avoid excessive horizontal nesting and use the backbone pattern to create scannable, maintainable code."
+    },
+    {
+      "title": "Hoon Fundamental Data Structures",
+      "description": "Cells are the core data structure in Hoon (and Nock), represented as [a b] pairs. Build n-tuples with [a b c ...] syntax (sugar for :* coltar). Create lists with ~[a b c] syntax (sugar for :~ colsig). The only fundamental datatype is the noun (atom or cell), with all other structures derived from these basics."
+    },
+    {
+      "title": "Hoon Atoms and Auras",
+      "description": "Atoms are unsigned integers with auras (metadata type tags). Common auras include @ud (decimal), @ux (hex), @p (ship name), @t (UTF-8 text), and @da (absolute date). Aura literal syntax varies: 123 for @ud, 0x123 for @ux, ~sampel-palnet for @p, 'text' for @t, and ~2022.1.1 for @da."
+    },
+    {
+      "title": "Hoon Subject-Oriented Programming",
+      "description": "Hoon is subject-oriented with no implicit environment. Code executes against a subject (data context) containing all variables in scope. Use . (dot) to reference the entire subject, - (hep) for the head, + (lus) for the tail. Access nested data with wing syntax (a.b.c). Use ^ (ket) prefix to skip a layer in name resolution. Always consider the subject when designing functions."
+    },
+    {
+      "title": "Hoon Type System",
+      "description": "Hoon uses molds (type validators/generators) for static typing. Define structures with $: (buccol), unions with $? (bucwut), and example-based types with $_ (buccab). Use ^- (kethep) or `type` syntax for typecasting. Irregular forms include ?(%a %b %c) for unions and [a=@ b=@] for structures with faces. All code must use static typing correctly."
+    },
+    {
+      "title": "Hoon Gates and Cores",
+      "description": "Functions in Hoon are 'gates' (one-armed cores). Create gates with |= (bartis) rune. Cores (code/data bundles) are created with |% (barcen) and contain multiple arms (named computations). Call gates with (gate arg) syntax or %- (cenhep) rune. Create doors (cores with sample) using |_ (barcab) for object-like structures with methods."
+    },
+    {
+      "title": "Hoon Irregular Syntax",
+      "description": "Use idiomatic irregular syntax when appropriate: $ for recursion, ` for typecasting, . for nock operations, , for toggling structure mode, and wing syntax (a.b.c) for accessing nested data. Never mix regular and irregular forms for the same operation within a single codebase to maintain consistency. Hoon does not allow custom syntax or macros."
+    },
+    {
+      "title": "Hoon Error Handling",
+      "description": "Handle errors with ?: (wutcol) for conditionals, ?~ (wutsig) for null checks, and ?@ (wutpat) for atom/cell tests. Use !. (zapzap) for crash with trace, !> (zapgar) for runtime type checking, and %= (centis) for change-subject operations. Always validate input data and handle edge cases explicitly."
+    },
+    {
+      "title": "Hoon Maps and Sets",
+      "description": "Use maps (key-value) with %- ~(put by map) [key value] syntax to add entries. Sets use ~(put in set) element syntax. Maps and sets are implemented as trees. For nested maps (mips), follow the ++mi (map of maps) interface in the standard library to maintain data integrity."
+    },
+    {
+      "title": "Hoon Standard Library Usage",
+      "description": "Leverage the standard library (+) for common operations. Use +roll for list reduction, +turn for mapping, +skim for filtering, +weld for concatenation, and +snag for indexing. Prefer standard library functions over custom implementations for better maintainability and performance."
+    },
+    {
+      "title": "Hoon Text Processing",
+      "description": "Text in Hoon is represented by cords (@t, UTF-8 atoms) or tapes (lists of @tD chars). Use (trip cord) to convert cord to tape, and (crip tape) for tape to cord. Text parsing is done with the standard library's parsing utilities like ;~ (micsig) combinator and parsers like (jest 'text') for literal matching."
+    },
+    {
+      "title": "Hoon Serialization",
+      "description": "Use +jam for serializing nouns to atoms and +cue for deserializing. When working with external systems, use +en:json:html for JSON encoding and +de:json:html for decoding. Always validate deserialized data before use. For over-the-wire formats, follow established Arvo conventions."
+    },
+    {
+      "title": "Hoon Cryptography Standards",
+      "description": "Use +crub:crypto for Suite B cryptography, +ed:crypto for elliptic curve operations, and +aes:crypto for symmetric encryption. For hashing, use +shax:crypto (SHA-256), +shaz:crypto (SHA-512), and +sham for insecure hashing. Follow cryptographic best practices and maintain key security."
+    },
+    {
+      "title": "Hoon Testing Conventions",
+      "description": "Write tests using %- expect:tests to compare expected vs. actual values. Group related tests into cores with arms for each test case. Tests should be deterministic and cover edge cases. Use minimal reproducible examples when testing complex functionality."
+    },
+    {
+      "title": "Hoon Documentation Standards",
+      "description": "Document code with :: single-line comments and ::: block comments. Every arm should have a brief comment explaining its purpose, inputs, and outputs. Use standard docblock format: arm name, input types, output type, and description. Keep comments up-to-date with code changes."
+    },
+    {
+      "title": "Nockchain Performance Optimization - Parallel Processing",
+      "description": "Implement multi-core mining strategies in Hoon by designing algorithms that can distribute work across cores. Use explicit nonce range partitioning to allow different CPU cores to work on separate ranges simultaneously. Where Hoon's single-threaded nature limits parallelism, delegate computationally intensive tasks to the Rust side which can utilize multiple threads efficiently."
+    },
+    {
+      "title": "Nockchain Performance Optimization - Memory Management",
+      "description": "Optimize memory usage in Hoon mining algorithms by minimizing allocation of large nouns, reusing data structures where possible, and leveraging in-place operations. For large data structures, consider using maps instead of lists for faster lookups. Avoid deep copying of large structures when passing data between functions, and use face-less patterns where appropriate to reduce memory overhead."
+    },
+    {
+      "title": "Nockchain Performance Optimization - Computation Caching",
+      "description": "Implement strategic caching for expensive computations in mining algorithms. Use ^~ (ketwut) for compile-time caching of constant values. For runtime caching, maintain a memoization map for frequently computed values. When designing proof-of-work algorithms, prefer incremental computation patterns that build on previous results rather than recomputing from scratch."
+    },
+    {
+      "title": "Nockchain Performance Optimization - Rust/Hoon Interop",
+      "description": "Maximize CPU utilization by implementing performance-critical mining algorithms in Rust while using Hoon for coordination and blockchain logic. Design clear interfaces between languages with efficient data marshalling. For PoW operations, partition work in Hoon but delegate intensive computation to Rust. Monitor CPU utilization and move hotspots from Hoon to Rust when profiling shows performance bottlenecks."
+    },
+    {
+      "title": "Nockchain Performance Optimization - Algorithm Efficiency",
+      "description": "Optimize mining algorithms by focusing on computational efficiency. Implement early termination conditions to avoid unnecessary work. Use binary search patterns instead of linear scans. For hash calculations, batch operations where possible and minimize intermediate allocations. Carefully profile mining code to identify hotspots, and redesign algorithms to reduce computational complexity rather than just optimizing implementation details."
+    },
+     {
+      "title": "Hoon Axioms and Principles",
+      "description": "Hoon is statically typed, purely functional, and strictly evaluated. It's axiomatic, minimalist, acyclic, homoiconic, and universally serializable. All code must be pure functions without side effects. Every noun must be either an atom (natural number) or a cell (pair of nouns). Every operation must be expressible within these constraints."
+    },
+    {
+      "title": "Runic Syntax Structure",
+      "description": "Hoon uses runes (digraph symbols) to represent operations. Always maintain proper rune spacing and indentation. Each rune has a fixed number of children with defined roles. Common runes include: |= (bartis) for gates, %- (cenhep) for function calls, =/ (tisfas) for variable assignment, and ?: (wutcol) for if-then-else. Never mix rune syntax styles within a codebase."
+    },
+    {
+      "title": "Vertical Backbone Pattern",
+      "description": "Structure Hoon code with a vertical 'backbone' of runes for clarity and maintainability. This pattern prevents deep indentation and helps identify control flow. Place the main runes at the left margin and indent their children consistently. Multi-line expressions should align subordinate runes to form a clear visual hierarchy."
+    },
+    {
+      "title": "Subject-Oriented Programming",
+      "description": "All Hoon code executes against a subject (data context). The subject contains everything in scope. Design functions with clear subject dependencies. Use . (dot) to reference the entire subject, - (hep) for head, + (lus) for tail. Access nested data with wing syntax (a.b.c). Explicitly pin values with =/ when needed rather than relying on deep wing paths."
+    },
+    {
+      "title": "Core Structure and Organization",
+      "description": "Organize code into cores (code and data pairs). Use |% (barcen) for library cores, |= (bartis) for gates (functions), and |_ (barcab) for doors (objects). Structure complex applications as stacked cores with clear hierarchies. Define public interfaces at the top level and implementation details in deeper arms."
+    },
+    {
+      "title": "Type System Usage",
+      "description": "Leverage Hoon's static type system for safety and documentation. Define structures with $: (buccol), unions with $? (bucwut), and type examples with $_ (buccab). Use ^- (kethep) for typecasting. Ensure all code has appropriate type annotations. For polymorphic code, use appropriate wet or dry gates. Define custom types for domain-specific data rather than using generic nouns."
+    },
+    {
+      "title": "Standard Library and Zuse",
+      "description": "Use standard library functions whenever possible instead of reimplementing functionality. Common library arms include: +turn for mapping, +roll for folding, +skim for filtering, +weld for concatenation, and +snag for indexing. Prefer library functions for better maintainability, performance (jets), and consistency. Import from Zuse (/= zuse /sys/vane/zuse) for additional system utilities."
+    },
+    {
+      "title": "Error Handling and Validation",
+      "description": "Handle errors explicitly with ?: (wutcol) for conditionals, ?~ (wutsig) for null checks, and ?@ (wutpat) for atom/cell tests. Use !. (zapzap) for crash with trace, !> (zapgar) for runtime type checking. Always validate input data using type checking and custom validation rules. For operations that might fail, use unit types to indicate possible failure."
+    },
+    {
+      "title": "Performance Optimization",
+      "description": "Optimize performance by using jets where appropriate. Use ^~ (ketwut) to cache computed values. Minimize memory allocation by reusing structures and avoiding deep copies. For computationally intensive tasks, consider alternate algorithms that reduce complexity. Use +jam and +cue efficiently for serialization. Batch operations where possible to reduce overhead."
+    },
+    {
+      "title": "Documentation Standards",
+      "description": "Document code with :: single-line comments and ::: block comments. Every arm should have a comment explaining its purpose, inputs, outputs, and any side effects. Follow the standard format: arm name, input types, output type, and description. Document types with meaningful names and docstrings. Keep comments up-to-date with code changes."
+    },
+    {
+      "title": "Homoiconicity and Metaprogramming",
+      "description": "Leverage Hoon's homoiconic nature for metaprogramming. Code and data have the same representation and can be converted to each other. When writing metaprogramming code, ensure the generated syntax closely mirrors the intended AST structure. Use !> (zapgar) to produce vases for dynamic programming. For dynamic code execution, use +mule for safe evaluation."
+    },
+    {
+      "title": "Data Structure Selection",
+      "description": "Choose appropriate data structures for the task. Use lists for ordered sequences, sets for unique collections, maps for key-value pairs, and jars/jugs for multimap patterns. For large collections, prefer maps over lists for O(log n) lookups. Use trees for hierarchical data. Select the structure that minimizes algorithmic complexity for your most common operations."
+    },
+    {
+      "title": "Text and Serialization",
+      "description": "Handle text using cords (@t, UTF-8 atoms) for compact storage or tapes (lists of @tD chars) for manipulation. Use (trip cord) to convert cord to tape, and (crip tape) for tape to cord. For data serialization, use +jam for compact binary format and +en:json:html for JSON interchange. Always validate deserialized data before use."
+    },
+    {
+      "title": "Cryptography and Security",
+      "description": "Use Hoon's cryptographic libraries securely. Prefer +crub:crypto for Suite B cryptography, +ed:crypto for elliptic curve operations, and +aes:crypto for symmetric encryption. For hashing, use +shax:crypto (SHA-256), +shaz:crypto (SHA-512), and never use +sham for security-critical hashing. Follow cryptographic best practices and maintain key security."
+    },
+    {
+      "title": "Testing and Verification",
+      "description": "Write comprehensive tests for all code. Use %- expect:tests to compare expected vs. actual values. Group related tests into cores with arms for each test case. Tests should be deterministic and cover edge cases. For complex functions, test both normal cases and boundary conditions. Use +mule to catch and test for expected crashes."
+    },
+    {
+      "title": "Resource Management",
+      "description": "Manage resources efficiently given Hoon's purely functional nature. For operations on large data, process incrementally to avoid memory pressure. When dealing with system resources in Arvo, follow request/response patterns and properly track outstanding work. Clean up resources explicitly when operations complete, even in error cases."
+    },
+    {
+      "title": "Nockchain Mining Optimization",
+      "description": "For blockchain operations, optimize mining code for maximum performance. Implement parallel processing where possible using explicit work distribution. Cache intermediate results to avoid redundant computation. For proof-of-work algorithms, use incremental verification. Balance work between Hoon (for coordination) and Rust (for intensive computation) to maximize CPU utilization."
+    }
+  ]
+}

+ 173 - 0
.cursor/rules/nockchain-rule.mdc

@@ -0,0 +1,173 @@
+---
+description: Nockchain
+globs: 
+alwaysApply: false
+---
+{
+  "rules": [
+    {
+      "title": "Project Architecture Overview",
+      "description": "Nockchain is a lightweight blockchain platform for verifiable heavyweight applications. The core philosophy is to replace verifiability-via-public-replication with verifiability-via-private-proving. The project is implemented using both Rust and Hoon languages, with Rust handling system infrastructure and Hoon managing core logic and state."
+    },
+    {
+      "title": "Core Technology Stack",
+      "description": "Core technologies include: 1) Nock Virtual Machine - based on the Nock computation model for on-chain execution; 2) Zero-Knowledge Proofs (ZKP) - using STARK proof systems for off-chain computation verification; 3) Hoon Language - functional programming language from the Urbit ecosystem for smart contracts and core logic; 4) P2P Network - peer-to-peer network implemented with libp2p; 5) Blockchain Primitives - including consensus mechanisms, mining, transaction validation, etc."
+    },
+    {
+      "title": "Rust Code Structure",
+      "description": "Rust code is organized into multiple crates: 1) nockchain - main blockchain node implementation; 2) nockchain-wallet - wallet functionality; 3) nockvm - Nock virtual machine implementation; 4) nockapp - application layer interfaces; 5) zkvm-jetpack - ZKP verification system; 6) hoonc - Hoon compiler; 7) kernels - core logic kernels; 8) nockchain-bitcoin-sync - Bitcoin blockchain synchronization; 9) nockchain-libp2p-io - network layer implementation."
+    },
+    {
+      "title": "Hoon Code Structure",
+      "description": "Hoon code is primarily organized in the hoon/ directory, including: 1) apps/ - core applications like dumbnet (blockchain core) and wallet; 2) common/ - shared libraries, including ZKP-related code (ztd, zose, etc.); 3) dat/ - data definitions; 4) constraints/ - constraint systems; 5) test-jams/ - test cases."
+    },
+    {
+      "title": "Mining System",
+      "description": "The mining system is implemented in two parts: 1) Rust side (crates/nockchain/src/mining.rs) - handling driver creation, mining attempts, key management; 2) Hoon side (hoon/apps/dumbnet/miner.hoon) - handling mining scheduling and core algorithms. The system supports both single key mining and advanced multi-key configurations."
+    },
+    {
+      "title": "ZKP System",
+      "description": "The ZKP system is primarily implemented in the zkvm-jetpack crate, using STARK proof technology. Core components include: 1) Base field arithmetic (form/math/base.rs); 2) Polynomial operations (form/poly.rs); 3) TIP5 hash function (form/math/tip5.rs); 4) Prover implementation (hoon/common/stark/prover.hoon); 5) Verifier implementation (hoon/common/ztd)."
+    },
+    {
+      "title": "Cross-language Interaction",
+      "description": "Rust and Hoon interact through noun data structures and the poke/wire mechanism. Rust sends mining requests and data to the Hoon kernel and receives effects as results. The protocol ensures type and tag consistency between both sides. All data exchanged must be encoded as noun structures."
+    },
+    {
+      "title": "Wallet System",
+      "description": "The wallet system is implemented in the nockchain-wallet crate, providing key generation, transaction signing, balance queries, and other functionality. It uses a UTXO-based model and supports BIP32 key derivation and BIP39 mnemonic phrases."
+    },
+    {
+      "title": "Consensus Mechanism",
+      "description": "The system uses a Proof of Work (PoW) consensus mechanism combined with longest-chain rule. Core implementation is in hoon/apps/dumbnet/lib/consensus.hoon. Block validation includes hash verification, parent block validation, PoW verification, and transaction validity checks."
+    },
+    {
+      "title": "Coding Style Guidelines",
+      "description": "Rust code must be formatted using rustfmt and follow snake_case naming conventions. Hoon code must follow the official Urbit Hoon code style guide, maintaining proper indentation and commenting conventions. All code optimizations and refactoring must strictly follow the project's language conventions and style guides."
+    },
+     {
+      "title": "Hoon-Rust Communication Architecture",
+      "description": "The Nockchain project uses a bidirectional communication architecture between Rust and Hoon components. Rust serves as the infrastructure layer providing I/O capabilities, while Hoon serves as the logic and state management layer. Communication flows through a structured 'poke/wire' mechanism, where Rust components 'poke' the Hoon kernel with instructions and data, and receive effects (results) back."
+    },
+    {
+      "title": "Wire Protocol",
+      "description": "All communication between Rust and Hoon uses a standardized wire protocol implemented through the 'Wire' trait in Rust. Each module that needs to communicate with the Hoon kernel must implement the Wire trait, which requires defining a SOURCE, VERSION, and to_wire() method that creates a WireRepr structure. The WireRepr contains source identifier, version number, and optional tags for additional context."
+    },
+    {
+      "title": "Poke Mechanism",
+      "description": "The 'poke' mechanism is the primary way Rust sends data to Hoon. A poke operation involves creating a NounSlab (data container), setting it up with appropriate Nock data structures, and sending it through a channel to the Hoon kernel. The NockAppHandle.poke() method handles this process and returns a PokeResult (Ack or Nack) to indicate success or failure."
+    },
+    {
+      "title": "Effect Handling",
+      "description": "After processing a poke, the Hoon kernel can produce 'effects' (responses) which are sent back to the Rust side. Effects are captured using the 'next_effect()' method on the NockAppHandle. Each effect is a noun structure that Rust code must interpret according to the protocol defined for that specific interaction. Effects allow asynchronous responses and callbacks from Hoon to Rust."
+    },
+    {
+      "title": "Noun Data Structure",
+      "description": "All data exchanged between Rust and Hoon must be encoded in the 'noun' format - a binary tree structure fundamental to Nock and Hoon. In Rust, the NounSlab class provides memory management for nouns. Common operations include creating atoms (D), cells (T), and using the tas! macro for converting strings to Nock atoms. Proper encoding and decoding of data between Rust and Hoon is critical for correct operation."
+    },
+    {
+      "title": "Driver Architecture",
+      "description": "The system uses an IO driver architecture where each external interface (mining, libp2p, http, file, etc.) is implemented as a driver that communicates with the Hoon kernel. Each driver has its own Wire implementation for identifying its messages. Drivers are registered with the NockApp and run asynchronously, using tokio for concurrency management. The IODriverFn type defines the interface all drivers must implement."
+    },
+    {
+      "title": "Example Interaction Flow",
+      "description": "A typical interaction flow (using mining as an example): 1) The mining driver creates a candidate block; 2) It pokes the Hoon kernel with a MiningWire::Candidate wire; 3) The Hoon kernel processes the candidate in the ++poke arm of the appropriate app; 4) If mining succeeds, an effect with [%command %pow ...] is returned; 5) The mining driver receives this effect and processes it accordingly."
+    },
+    {
+      "title": "Error Handling Protocol",
+      "description": "Error handling follows a specific protocol: 1) For synchronous errors, the poke returns a PokeResult::Nack; 2) For asynchronous errors, an appropriate error effect is returned; 3) Drivers must handle both synchronous and asynchronous errors gracefully; 4) Structured logging via tracing macros should be used for error reporting and debugging."
+    },
+    {
+      "title": "Cross-Language Documentation Standards",
+      "description": "When implementing cross-language interactions, both sides must be documented: 1) Rust side should document the Wire implementation, data structure, and expected effects; 2) Hoon side should document the ++poke arm that handles the wire, the data format expected, and the effects it produces; 3) Any changes to the protocol must be updated on both sides simultaneously to maintain compatibility."
+    },
+    {
+      "title": "Testing Cross-Language Interactions",
+      "description": "Testing cross-language interactions requires special attention: 1) Unit tests for Rust components should mock the Hoon kernel responses; 2) Integration tests should verify end-to-end behavior using real Hoon code; 3) Test fixtures should include sample nouns for both directions of communication; 4) Version mismatches and protocol errors should be explicitly tested."
+    },
+    {
+      "title": "Mining System Architecture",
+      "description": "The Nockchain mining system is split between Rust and Hoon components. Rust manages driver creation, mining scheduling, and key management (crates/nockchain/src/mining.rs), while Hoon implements the core mining algorithm (hoon/apps/dumbnet/miner.hoon and hoon/common/pow.hoon). This hybrid architecture leverages Rust's concurrency for orchestration and Hoon's deterministic execution for consensus-critical operations."
+    },
+    {
+      "title": "Mining Initialization Flow",
+      "description": "The mining initialization follows a specific sequence: 1) NockchainCli processes command-line mining parameters; 2) create_mining_driver creates an IODriverFn with mining configuration; 3) If mining keys are provided, set_mining_key or set_mining_key_advanced is called; 4) enable_mining activates or deactivates mining based on the --mine flag; 5) The mining driver is registered with NockApp via add_io_driver."
+    },
+    {
+      "title": "Mining Communication Protocol",
+      "description": "Mining communication follows the poke/wire protocol: 1) Rust sends candidate blocks using MiningWire::Candidate; 2) Hoon processes candidates in the ++poke arm of miner.hoon; 3) Successful mining results in [%command %pow proof hash block-commitment nonce] effects; 4) Rust captures these effects and sends them to the blockchain core using MiningWire::Mined; 5) All data structures must be properly encoded as nouns for cross-language compatibility."
+    },
+    {
+      "title": "Mining Algorithm Implementation",
+      "description": "The proof-of-work algorithm is implemented in hoon/common/pow.hoon with ++prove-block-inner as the main entry point. It combines block data, mining difficulty, and nonce values to search for valid hashes. The algorithm requires finding a hash that meets the network's difficulty target. Implementation must be consistent between Hoon and Rust components to maintain consensus."
+    },
+    {
+      "title": "Concurrent Mining Strategy",
+      "description": "Nockchain uses a concurrent mining strategy where: 1) Multiple mining attempts run simultaneously in separate tasks; 2) New candidate blocks are either immediately processed or queued if current attempts are active; 3) Each mining attempt runs in a separate tokio task with its own kernel instance; 4) This approach maximizes CPU utilization while providing clean separation between attempts; 5) When a valid solution is found, the result is communicated back to the main mining driver."
+    },
+    {
+      "title": "Mining Key Management",
+      "description": "Mining rewards are managed through configurable key settings: 1) Single key mining uses a simple pubkey configuration; 2) Advanced configurations support multi-signature schemes with share and threshold parameters (format: share,m:key1,key2,...); 3) The MiningKeyConfig struct parses and validates these configurations; 4) Keys must be valid base58-encoded strings; 5) Advanced configurations allow mining rewards to be split among multiple participants."
+    },
+    {
+      "title": "Mining Driver Lifecycle",
+      "description": "The mining driver follows a lifecycle pattern: 1) Initialization phase sets keys and enables mining; 2) Main loop phase processes candidate blocks and effects; 3) Mining attempts are spawned as needed; 4) Results processing phase handles mining successes; 5) The driver continues running until the application terminates; 6) Proper shutdown handling ensures mining attempts are terminated cleanly."
+    },
+    {
+      "title": "Mining Performance Considerations",
+      "description": "Mining performance optimization requires careful resource management: 1) Each mining attempt creates a temporary directory for kernel state; 2) Kernels use hot state from zkvm_jetpack for faster proving; 3) Memory utilization must be monitored as multiple concurrent mining attempts can consume significant resources; 4) The number of concurrent attempts should be tuned based on available CPU cores and memory; 5) Performance metrics should be collected to optimize resource allocation."
+    },
+    {
+      "title": "Hoon Mining Core Rules",
+      "description": "Hoon mining core implementation must follow these rules: 1) The miner.hoon app must implement a poke arm that handles candidate blocks; 2) prove-block-inner must be deterministic across all nodes for consensus; 3) The proof generation process must use standardized STARK proving techniques from common/stark/prover; 4) Effects returned must use the %command %pow tag format; 5) Block commitment and nonce values must be preserved from the input and included in the output effect."
+    },
+    {
+      "title": "Rust Mining Driver Rules",
+      "description": "Rust mining driver implementation must follow these rules: 1) All mining configuration must be parsed and validated before use; 2) Mining attempts must be isolated to prevent cross-contamination; 3) Effect handling must verify the format and content before processing; 4) Error handling must be robust with appropriate logging; 5) Network effects from successful mining must be propagated quickly to maintain chain efficiency; 6) Memory and resource management must prevent leaks even during high mining activity."
+    },
+    {
+      "title": "构建流程顺序",
+      "description": "Nockchain 的构建流程必须严格按照以下顺序执行:1) make install-hoonc:安装 Hoon 编译器 hoonc,确保后续构建依赖可用;2) make build:编译 Nockchain 及钱包等所有二进制文件和资源;3) make install-nockchain-wallet:安装编译好的钱包程序到 $HOME/.cargo/bin 目录;4) make install-nockchain:安装编译好的 Nockchain 节点程序到 $HOME/.cargo/bin 目录。"
+    },
+    {
+      "title": "构建步骤依赖关系",
+      "description": "make install-hoonc 必须在 make build 之前执行(如果项目依赖 hoonc);make build 必须在 make install-nockchain-wallet 和 make install-nockchain 之前执行;make install-nockchain-wallet 和 make install-nockchain 的顺序可以互换,但都必须在 make build 之后。"
+    },
+    {
+      "title": "构建命令说明",
+      "description": "make install-hoonc:安装 Hoon 编译器 hoonc;make build:编译 Nockchain 及钱包等所有二进制文件和资源;make install-nockchain-wallet:安装钱包程序到 $HOME/.cargo/bin 目录;make install-nockchain:安装 Nockchain 节点程序到 $HOME/.cargo/bin 目录。"
+    },
+    {
+      "title": "构建环境要求",
+      "description": "在执行构建命令前,需确保已安装 rustup、clang、llvm-dev、libclang-dev 等依赖,并正确配置环境变量(如 PATH)。"
+    },
+     {
+      "title": "Nockchain 节点挖矿启动参数规范",
+      "description": "启动 Nockchain 节点进行挖矿时,推荐使用如下参数配置:1) --mining-pubkey <公钥>:指定用于挖矿奖励的公钥;2) --mine:启用挖矿模式;3) --npc-socket nockchain.sock:指定与 Hoon 内核通信的 socket 文件;4) --bind /ip4/0.0.0.0/udp/3009/quic-v1:指定本地监听的 P2P 网络地址和端口;5) --peer <peer-address>:添加一个或多个 P2P 网络的种子节点;6) --no-default-peers:禁用默认内置的 peer 列表,仅使用手动指定的 peer;7) > nockchain.log 2>&1 &:将日志输出重定向到 nockchain.log 文件,并在后台运行。"
+    },
+    {
+      "title": "参数示例",
+      "description": "--mining-pubkey 31pK6dSu7PsQd6H8HKQo79iMXCvBTt4oDR12X7WbE4MsFLCa6YchVDP2B9Wu4rxGn5vNpsA5sppeJizi2UfS117iGERgr2eQELsQJYzChn52oVLTcHvd4sKUiK6GT4QrQG8S \\ --mine \\ --npc-socket nockchain.sock \\ --bind /ip4/0.0.0.0/udp/3009/quic-v1 \\ --peer /ip4/38.147.105.46/udp/3006/quic-v1 \\ --peer /ip4/151.80.18.102/udp/3006/quic-v1 \\ --peer /ip4/74.81.33.41/udp/3006/quic-v1 \\ --no-default-peers \\ > nockchain.log 2>&1 &"
+    },
+    {
+      "title": "参数说明",
+      "description": "1) --mining-pubkey:设置挖矿奖励公钥,必须为有效的 base58 编码字符串;2) --mine:开启挖矿功能;3) --npc-socket:指定与 Hoon 内核通信的本地 socket 文件路径;4) --bind:设置本地节点监听的网络地址和端口,建议使用 quic-v1 协议;5) --peer:可多次指定,添加外部已知节点以加速网络发现和同步;6) --no-default-peers:只连接手动指定的 peer,提升网络可控性和安全性;7) 日志重定向和后台运行,便于长期监控和排查。"
+    },
+     {
+      "title": "poke/wire 跨语言通信机制",
+      "description": "Nockchain 项目采用 poke/wire 机制实现 Rust 与 Hoon 之间的双向通信。Rust 侧通过实现 Wire trait,将指令和数据编码为 noun 结构,并通过 poke 方法发送给 Hoon 内核。Hoon 侧在 ++poke arm 处理收到的指令,并可通过 effect 机制异步返回结果。所有跨语言数据必须严格遵循 noun 格式和协议规范,确保类型和标签一致。"
+    },
+    {
+      "title": "poke 操作流程",
+      "description": "1) Rust 侧构造 NounSlab 数据结构,封装指令和参数;2) 通过 NockAppHandle.poke() 方法,将数据以指定 wire 类型发送给 Hoon 内核;3) Hoon 侧在对应 app 的 ++poke arm 解析 wire 和 noun 数据,执行相应逻辑;4) 处理完成后,Hoon 可通过 effect 机制将结果异步返回 Rust 侧。"
+    },
+    {
+      "title": "wire 协议规范",
+      "description": "每个需要与 Hoon 通信的 Rust 模块都必须实现 Wire trait,定义 SOURCE、VERSION 及 to_wire() 方法,生成 WireRepr 结构体。WireRepr 包含源标识、版本号和可选标签,确保通信协议的可扩展性和兼容性。"
+    },
+    {
+      "title": "数据格式要求",
+      "description": "所有 poke/wire 交互的数据必须以 noun(二叉树)格式编码。Rust 侧通过 NounSlab、Atom、Cell 等类型构造数据,Hoon 侧通过 ++poke arm 解析。双方需保持协议和数据结构同步,避免兼容性问题。"
+    }
+  ]
+}

+ 4 - 0
.cursorrules

@@ -0,0 +1,4 @@
+{
+  "title": "Code Optimization Standards",
+  "description": "All code optimizations and refactoring must strictly follow the current project's language conventions and style guides. Ensure that all changes compile successfully and do not introduce new compilation errors."
+} 

+ 3 - 0
.vscode/settings.json

@@ -0,0 +1,3 @@
+{
+    "makefile.configureOnOpen": true
+}

+ 1 - 0
Cargo.lock

@@ -3513,6 +3513,7 @@ dependencies = [
  "nockchain-libp2p-io",
  "nockvm",
  "nockvm_macros",
+ "num_cpus",
  "tempfile",
  "termcolor",
  "tokio",

+ 1 - 0
crates/nockchain/Cargo.toml

@@ -31,6 +31,7 @@ libp2p = { workspace = true, features = [
     "cbor",
 ] }
 nockchain-libp2p-io.workspace = true
+num_cpus = "1.16.0"
 tempfile = { workspace = true }
 termcolor.workspace = true
 tokio = { workspace = true, features = ["full"] }

+ 20 - 13
crates/nockchain/src/lib.rs

@@ -8,10 +8,9 @@ use clap::{arg, command, ArgAction, Parser};
 use libp2p::identity::Keypair;
 use libp2p::multiaddr::Multiaddr;
 use libp2p::{allow_block_list, connection_limits, memory_connection_limits, PeerId};
-use nockapp::driver::Operation;
 use nockapp::kernel::boot;
 use nockapp::wire::Wire;
-use nockapp::{one_punch_driver, NockApp, NounExt};
+use nockapp::NockApp;
 use nockchain_bitcoin_sync::{bitcoin_watcher_driver, BitcoinRPCConnection, GenesisNodeType};
 use nockchain_libp2p_io::p2p::{
     MAX_ESTABLISHED_CONNECTIONS, MAX_ESTABLISHED_CONNECTIONS_PER_PEER,
@@ -191,6 +190,11 @@ pub struct NockchainCli {
         value_delimiter = ',',
     )]
     pub mining_key_adv: Option<Vec<MiningKeyConfig>>,
+    #[arg(
+        long,
+        help = "Maximum concurrent mining attempts (default: number of CPU cores)"
+    )]
+    pub max_concurrent_mining_attempts: Option<usize>,
     #[arg(long, help = "Watch for genesis block", default_value = "false")]
     pub genesis_watcher: bool,
     #[arg(long, help = "Mine genesis block", default_value = "false")]
@@ -278,6 +282,15 @@ impl NockchainCli {
             }
         }
 
+        // Validate max_concurrent_mining_attempts range
+        if let Some(max_concurrent) = self.max_concurrent_mining_attempts {
+            if max_concurrent == 0 || max_concurrent > 256 {
+                return Err(
+                    "max_concurrent_mining_attempts must be between 1 and 256".to_string(),
+                );
+            }
+        }
+
         Ok(())
     }
 
@@ -556,17 +569,11 @@ pub async fn init_with_kernel(
     }
 
     let mining_config = cli.as_ref().and_then(|c| {
-        if let Some(pubkey) = &c.mining_pubkey {
-            Some(vec![MiningKeyConfig {
-                share: 1,
-                m: 1,
-                keys: vec![pubkey.clone()],
-            }])
-        } else if let Some(mining_key_adv) = &c.mining_key_adv {
-            Some(mining_key_adv.clone())
-        } else {
-            None
-        }
+        crate::mining::MiningConfig::from_cli(
+            c.mining_pubkey.clone(),
+            c.mining_key_adv.clone(),
+            c.max_concurrent_mining_attempts,
+        )
     });
 
     let mine = cli.as_ref().map_or(false, |c| c.mine);

+ 283 - 44
crates/nockchain/src/mining.rs

@@ -1,4 +1,6 @@
 use std::str::FromStr;
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::Arc;
 
 use kernels::miner::KERNEL;
 use nockapp::kernel::checkpoint::JamPaths;
@@ -11,7 +13,25 @@ use nockapp::noun::{AtomExt, NounExt};
 use nockvm::noun::{Atom, D, T};
 use nockvm_macros::tas;
 use tempfile::tempdir;
-use tracing::{instrument, warn};
+use tracing::{debug, info, instrument, warn};
+
+/// Goldilocks prime: p = 2^64 - 2^32 + 1
+const GOLDILOCKS_PRIME: u64 = 18446744069414584321;
+
+/// Convert an atom to tip5 digest format [a b c d e]
+/// This mirrors the Hoon function atom-to-digest:tip5:zeke
+fn atom_to_digest(nonce_slab: &mut NounSlab, buffer: u64) -> nockvm::noun::Noun {
+    let p = GOLDILOCKS_PRIME;
+    
+    let (q1, a) = (buffer / p, buffer % p);
+    let (q2, b) = (q1 / p, q1 % p);
+    let (q3, c) = (q2 / p, q2 % p);
+    let (e, d) = (q3 / p, q3 % p);
+    
+    debug!("Converting nonce {} to digest: [{} {} {} {} {}]", buffer, a, b, c, d, e);
+    
+    T(nonce_slab, &[D(a), D(b), D(c), D(d), D(e)])
+}
 
 pub enum MiningWire {
     Mined,
@@ -46,39 +66,97 @@ pub struct MiningKeyConfig {
     pub share: u64,
     pub m: u64,
     pub keys: Vec<String>,
+    /// Maximum number of concurrent mining attempts (default: num_cpus::get())
+    pub max_concurrent_attempts: Option<usize>,
 }
 
 impl FromStr for MiningKeyConfig {
     type Err = String;
 
     fn from_str(s: &str) -> Result<Self, Self::Err> {
-        // Expected format: "share,m:key1,key2,key3"
+        // Expected format: "share,m:key1,key2,key3" or "share,m,max_concurrent:key1,key2,key3"
         let parts: Vec<&str> = s.split(':').collect();
         if parts.len() != 2 {
-            return Err("Invalid format. Expected 'share,m:key1,key2,key3'".to_string());
+            return Err("Invalid format. Expected 'share,m:key1,key2,key3' or 'share,m,max_concurrent:key1,key2,key3'".to_string());
         }
 
         let share_m: Vec<&str> = parts[0].split(',').collect();
-        if share_m.len() != 2 {
-            return Err("Invalid share,m format".to_string());
+        if share_m.len() < 2 || share_m.len() > 3 {
+            return Err("Invalid share,m format. Expected 'share,m' or 'share,m,max_concurrent'".to_string());
         }
 
         let share = share_m[0].parse::<u64>().map_err(|e| e.to_string())?;
         let m = share_m[1].parse::<u64>().map_err(|e| e.to_string())?;
+        
+        let max_concurrent_attempts = if share_m.len() == 3 {
+            Some(share_m[2].parse::<usize>().map_err(|e| format!("Invalid max_concurrent value: {}", e))?)
+        } else {
+            None
+        };
+        
         let keys: Vec<String> = parts[1].split(',').map(String::from).collect();
 
-        Ok(MiningKeyConfig { share, m, keys })
+        Ok(MiningKeyConfig { 
+            share, 
+            m, 
+            keys, 
+            max_concurrent_attempts,
+        })
+    }
+}
+
+/// Mining configuration extracted from CLI arguments
+#[derive(Debug, Clone)]
+pub struct MiningConfig {
+    pub key_configs: Vec<MiningKeyConfig>,
+    pub max_concurrent_attempts: usize,
+}
+
+impl MiningConfig {
+    pub fn from_cli(
+        mining_pubkey: Option<String>,
+        mining_key_adv: Option<Vec<MiningKeyConfig>>,
+        max_concurrent_attempts: Option<usize>,
+    ) -> Option<Self> {
+        let key_configs = if let Some(pubkey) = mining_pubkey {
+            vec![MiningKeyConfig {
+                share: 1,
+                m: 1,
+                keys: vec![pubkey],
+                max_concurrent_attempts: None,
+            }]
+        } else if let Some(configs) = mining_key_adv {
+            configs
+        } else {
+            return None;
+        };
+
+        // Determine the maximum concurrent attempts
+        let max_concurrent = max_concurrent_attempts
+            .or_else(|| {
+                // Use the first config's max_concurrent_attempts if specified
+                key_configs.first()?.max_concurrent_attempts
+            })
+            .unwrap_or_else(|| {
+                // Default to number of logical CPUs
+                num_cpus::get()
+            });
+
+        Some(MiningConfig {
+            key_configs,
+            max_concurrent_attempts: max_concurrent,
+        })
     }
 }
 
 pub fn create_mining_driver(
-    mining_config: Option<Vec<MiningKeyConfig>>,
+    mining_config: Option<MiningConfig>,
     mine: bool,
     init_complete_tx: Option<tokio::sync::oneshot::Sender<()>>,
 ) -> IODriverFn {
     Box::new(move |mut handle| {
         Box::pin(async move {
-            let Some(configs) = mining_config else {
+            let Some(config) = mining_config else {
                 enable_mining(&handle, false).await?;
 
                 if let Some(tx) = init_complete_tx {
@@ -90,14 +168,16 @@ pub fn create_mining_driver(
 
                 return Ok(());
             };
-            if configs.len() == 1
-                && configs[0].share == 1
-                && configs[0].m == 1
-                && configs[0].keys.len() == 1
+            
+            // Set up mining keys
+            if config.key_configs.len() == 1
+                && config.key_configs[0].share == 1
+                && config.key_configs[0].m == 1
+                && config.key_configs[0].keys.len() == 1
             {
-                set_mining_key(&handle, configs[0].keys[0].clone()).await?;
+                set_mining_key(&handle, config.key_configs[0].keys[0].clone()).await?;
             } else {
-                set_mining_key_advanced(&handle, configs).await?;
+                set_mining_key_advanced(&handle, config.key_configs).await?;
             }
             enable_mining(&handle, mine).await?;
 
@@ -111,8 +191,14 @@ pub fn create_mining_driver(
             if !mine {
                 return Ok(());
             }
-            let mut next_attempt: Option<NounSlab> = None;
-            let mut current_attempt: tokio::task::JoinSet<()> = tokio::task::JoinSet::new();
+
+            info!("Starting mining driver with {} max concurrent attempts", config.max_concurrent_attempts);
+            
+            // Track active mining attempts
+            let active_attempts = Arc::new(AtomicUsize::new(0));
+            let mut current_attempts: tokio::task::JoinSet<()> = tokio::task::JoinSet::new();
+            let mut candidate_queue: Vec<(NounSlab, u64)> = Vec::new();
+            let base_nonce = Arc::new(AtomicUsize::new(0));
 
             loop {
                 tokio::select! {
@@ -132,27 +218,57 @@ pub fn create_mining_driver(
                                 slab.copy_into(effect_cell.tail());
                                 slab
                             };
-                            if !current_attempt.is_empty() {
-                                next_attempt = Some(candidate_slab);
-                            } else {
+                            
+                            info!("Received new mining candidate, preparing {} concurrent attempts", 
+                                  config.max_concurrent_attempts);
+                            
+                            base_nonce.store(0, Ordering::Relaxed);
+                            
+                            current_attempts.abort_all();
+                            candidate_queue.clear();
+                            active_attempts.store(0, Ordering::Relaxed);
+                            
+                            let nonce_range_size = 10000;
+                            
+                            for thread_id in 0..config.max_concurrent_attempts {
                                 let (cur_handle, attempt_handle) = handle.dup();
                                 handle = cur_handle;
-                                current_attempt.spawn(mining_attempt(candidate_slab, attempt_handle));
+                                let attempt_counter = Arc::clone(&active_attempts);
+                                let _nonce_counter = Arc::clone(&base_nonce);
+                                attempt_counter.fetch_add(1, Ordering::Relaxed);
+                                
+                                                            let candidate_copy = {
+                                let mut slab = NounSlab::new();
+                                slab.copy_into(unsafe { *candidate_slab.root() });
+                                slab
+                            };
+                                
+                                let nonce_start = thread_id * nonce_range_size;
+                                
+                                info!("Starting mining thread {} with nonce range {}-{}", 
+                                      thread_id + 1, 
+                                      nonce_start, 
+                                      nonce_start + nonce_range_size - 1);
+                                
+                                current_attempts.spawn(mining_attempt_with_nonce_range(
+                                    candidate_copy,
+                                    attempt_handle,
+                                    attempt_counter,
+                                    nonce_start as u64,
+                                    nonce_range_size as u64,
+                                    thread_id,
+                                ));
                             }
                         }
                     },
-                    mining_attempt_res = current_attempt.join_next(), if !current_attempt.is_empty()  => {
+                    mining_attempt_res = current_attempts.join_next(), if !current_attempts.is_empty() => {
                         if let Some(Err(e)) = mining_attempt_res {
                             warn!("Error during mining attempt: {e:?}");
                         }
-                        let Some(candidate_slab) = next_attempt else {
-                            continue;
-                        };
-                        next_attempt = None;
-                        let (cur_handle, attempt_handle) = handle.dup();
-                        handle = cur_handle;
-                        current_attempt.spawn(mining_attempt(candidate_slab, attempt_handle));
-
+                        
+                        debug!("Mining attempt completed, active attempts: {}/{}", 
+                               active_attempts.load(Ordering::Relaxed), 
+                               config.max_concurrent_attempts);
                     }
                 }
             }
@@ -160,35 +276,158 @@ pub fn create_mining_driver(
     })
 }
 
-pub async fn mining_attempt(candidate: NounSlab, handle: NockAppHandle) -> () {
+async fn mining_attempt_with_nonce_range(
+    candidate: NounSlab, 
+    handle: NockAppHandle, 
+    attempt_counter: Arc<AtomicUsize>,
+    nonce_start: u64,
+    nonce_range_size: u64,
+    thread_id: usize,
+) -> () {
+    // Ensure we decrement the counter when this attempt finishes
+    let _guard = CounterGuard::new(Arc::clone(&attempt_counter));
+    
+    debug!("Mining attempt starting");
+    mining_attempt_with_nonce(candidate, handle, nonce_start, nonce_range_size, thread_id).await;
+    debug!("Mining attempt finished");
+}
+
+/// RAII guard to ensure attempt counter is properly decremented
+struct CounterGuard {
+    counter: Arc<AtomicUsize>,
+}
+
+impl CounterGuard {
+    fn new(counter: Arc<AtomicUsize>) -> Self {
+        Self { counter }
+    }
+}
+
+impl Drop for CounterGuard {
+    fn drop(&mut self) {
+        let prev = self.counter.fetch_sub(1, Ordering::Relaxed);
+        debug!("Mining attempt counter decremented: {} -> {}", prev, prev - 1);
+    }
+}
+
+pub async fn mining_attempt_with_nonce(
+    candidate: NounSlab, 
+    handle: NockAppHandle, 
+    nonce_start: u64,
+    nonce_range_size: u64,
+    thread_id: usize,
+) -> () {
+    let attempt_start = std::time::Instant::now();
+    debug!("Thread {}: Creating temporary directory for mining attempt with nonce range {}-{}", 
+           thread_id, nonce_start, nonce_start + nonce_range_size - 1);
+    
     let snapshot_dir =
         tokio::task::spawn_blocking(|| tempdir().expect("Failed to create temporary directory"))
             .await
             .expect("Failed to create temporary directory");
+    
+    debug!("Thread {}: Producing prover hot state", thread_id);
     let hot_state = zkvm_jetpack::hot::produce_prover_hot_state();
     let snapshot_path_buf = snapshot_dir.path().to_path_buf();
     let jam_paths = JamPaths::new(snapshot_dir.path());
-    // Spawns a new std::thread for this mining attempt
+    
+    debug!("Thread {}: Loading mining kernel with hot state", thread_id);
+    let kernel_start = std::time::Instant::now();
     let kernel =
         Kernel::load_with_hot_state_huge(snapshot_path_buf, jam_paths, KERNEL, &hot_state, false)
             .await
             .expect("Could not load mining kernel");
-    let effects_slab = kernel
-        .poke(MiningWire::Candidate.to_wire(), candidate)
-        .await
-        .expect("Could not poke mining kernel with candidate");
-    for effect in effects_slab.to_vec() {
-        let Ok(effect_cell) = (unsafe { effect.root().as_cell() }) else {
-            drop(effect);
-            continue;
+    
+    debug!("Thread {}: Kernel loaded in {:?}", thread_id, kernel_start.elapsed());
+    
+    // 在nonce范围内逐个尝试挖矿
+    for nonce_offset in 0..nonce_range_size {
+        let current_nonce = nonce_start + nonce_offset;
+        
+        debug!("Thread {}: Attempting nonce {} ({}/{} in range)", 
+               thread_id, current_nonce, nonce_offset + 1, nonce_range_size);
+        
+        // 构建包含当前nonce的候选数据结构
+        let nonce_candidate = {
+            let mut nonce_slab = NounSlab::new();
+            
+            // 调试:打印原始候选数据的结构
+            debug!("Thread {}: Original candidate structure: {:?}", 
+                   thread_id, unsafe { candidate.root() });
+            
+            // 解析原始候选数据: [length block-commitment original-nonce]
+            let original_cell = unsafe { candidate.root().as_cell() }
+                .expect("Candidate should be a cell");
+            
+            debug!("Thread {}: Original cell head: {:?}, tail: {:?}", 
+                   thread_id, original_cell.head(), original_cell.tail());
+            
+            let length = original_cell.head();
+            let tail = original_cell.tail();
+            
+            // tail应该是 [block-commitment original-nonce]
+            let tail_cell = tail.as_cell()
+                .expect("Candidate tail should be a cell containing [block-commitment nonce]");
+            
+            debug!("Thread {}: Tail cell head (block-commitment): {:?}, tail (original-nonce): {:?}", 
+                   thread_id, tail_cell.head(), tail_cell.tail());
+            
+            let block_commitment = tail_cell.head();
+            // 注意:我们忽略原始的nonce (tail_cell.tail()),使用新的current_nonce
+            
+            // 创建当前nonce的tip5 digest格式
+            let nonce_digest = atom_to_digest(&mut nonce_slab, current_nonce);
+            
+            // 构建符合miner.hoon期望的候选结构: [length block-commitment nonce]
+            let nonce_candidate = T(
+                &mut nonce_slab,
+                &[
+                    length,                    // length
+                    block_commitment,          // block-commitment  
+                    nonce_digest,             // nonce (tip5 digest格式)
+                ],
+            );
+            
+            debug!("Thread {}: Built new candidate with nonce {} replacing original nonce", 
+                   thread_id, current_nonce);
+            
+            nonce_slab.set_root(nonce_candidate);
+            nonce_slab
         };
-        if effect_cell.head().eq_bytes("command") {
-            handle
-                .poke(MiningWire::Mined.to_wire(), effect)
-                .await
-                .expect("Could not poke nockchain with mined PoW");
+        
+        debug!("Thread {}: Poking mining kernel with nonce {}", thread_id, current_nonce);
+        let poke_start = std::time::Instant::now();
+        
+        let effects_slab = kernel
+            .poke(MiningWire::Candidate.to_wire(), nonce_candidate)
+            .await
+            .expect("Could not poke mining kernel");
+        
+        debug!("Thread {}: Mining poke completed in {:?} for nonce {}", 
+               thread_id, poke_start.elapsed(), current_nonce);
+        
+        // 检查是否找到了有效的证明
+        for effect in effects_slab.to_vec() {
+            let Ok(effect_cell) = (unsafe { effect.root().as_cell() }) else {
+                drop(effect);
+                continue;
+            };
+            if effect_cell.head().eq_bytes("command") {
+                info!("Thread {}: Mining SUCCESS! Found valid proof with nonce {} in {:?}", 
+                      thread_id, current_nonce, attempt_start.elapsed());
+                handle
+                    .poke(MiningWire::Mined.to_wire(), effect)
+                    .await
+                    .expect("Could not poke nockchain with mined PoW");
+                return;
+            }
         }
+        
+        debug!("Thread {}: Nonce {} did not produce valid proof", thread_id, current_nonce);
     }
+    
+    debug!("Thread {}: Exhausted nonce range {}-{} without success in {:?}", 
+           thread_id, nonce_start, nonce_start + nonce_range_size - 1, attempt_start.elapsed());
 }
 
 #[instrument(skip(handle, pubkey))]

BIN
scripts/.DS_Store


+ 130 - 0
test_concurrent_mining.sh

@@ -0,0 +1,130 @@
+#!/bin/bash
+
+# 测试并发挖矿脚本
+# 用于验证多核CPU优化的实现
+
+set -e
+
+echo "=== Nockchain 并发挖矿测试 ==="
+
+# 检查构建是否完成
+if [ ! -f "target/release/nockchain" ]; then
+    echo "错误: nockchain 二进制文件不存在,请先运行 'make build'"
+    exit 1
+fi
+
+# 获取系统CPU核心数
+CPU_CORES=$(sysctl -n hw.ncpu 2>/dev/null || nproc 2>/dev/null || echo "4")
+echo "检测到 CPU 核心数: $CPU_CORES"
+
+# 测试不同的并发参数
+TEST_CASES=(
+    "1:单线程挖矿"
+    "2:双线程挖矿" 
+    "4:四线程挖矿"
+    "$CPU_CORES:全核心挖矿"
+    "$((CPU_CORES * 2)):超线程测试"
+)
+
+MINING_PUBKEY="31pK6dSu7PsQd6H8HKQo79iMXCvBTt4oDR12X7WbE4MsFLCa6YchVDP2B9Wu4rxGn5vNpsA5sppeJizi2UfS117iGERgr2eQELsQJYzChn52oVLTcHvd4sKUiK6GT4QrQG8S"
+
+# 创建测试目录
+TEST_DIR="test_mining_$(date +%Y%m%d_%H%M%S)"
+mkdir -p "$TEST_DIR"
+cd "$TEST_DIR"
+
+echo "测试目录: $(pwd)"
+
+for test_case in "${TEST_CASES[@]}"; do
+    IFS=':' read -r threads description <<< "$test_case"
+    
+    echo ""
+    echo "=== 测试: $description (线程数: $threads) ==="
+    
+    # 创建子目录
+    test_subdir="test_${threads}_threads"
+    mkdir -p "$test_subdir"
+    cd "$test_subdir"
+    
+    # 设置日志级别以便看到详细信息
+    export RUST_LOG="info,nockchain=debug"
+    export RUST_BACKTRACE=1
+    
+    echo "开始挖矿测试,线程数: $threads"
+    echo "如果在30秒内没有成功,将自动停止测试"
+    
+    # 启动挖矿节点,设置超时
+    timeout 30s ../../target/release/nockchain \
+        --npc-socket "nockchain_${threads}.sock" \
+        --mining-pubkey "$MINING_PUBKEY" \
+        --max-concurrent-mining-attempts "$threads" \
+        --mine \
+        --fakenet \
+        --genesis-leader \
+        --genesis-message "测试并发挖矿 - ${threads}线程" \
+        2>&1 | tee "mining_${threads}.log" &
+    
+    mining_pid=$!
+    
+    # 等待挖矿过程
+    sleep 5
+    
+    # 检查进程是否还在运行
+    if ps -p $mining_pid > /dev/null; then
+        echo "挖矿进程正在运行 (PID: $mining_pid)"
+        
+        # 检查日志中是否有并发挖矿的迹象
+        if grep -q "Starting mining attempt" "mining_${threads}.log"; then
+            echo "✅ 成功: 检测到挖矿尝试开始"
+        else
+            echo "⚠️  警告: 未检测到挖矿尝试"
+        fi
+        
+        if grep -q "max concurrent attempts" "mining_${threads}.log"; then
+            echo "✅ 成功: 检测到并发参数配置"
+        else
+            echo "⚠️  警告: 未检测到并发参数配置"
+        fi
+        
+        # 停止挖矿进程
+        kill $mining_pid 2>/dev/null || true
+        wait $mining_pid 2>/dev/null || true
+    else
+        echo "❌ 失败: 挖矿进程提前退出"
+    fi
+    
+    # 分析日志
+    echo "分析日志文件..."
+    if [ -f "mining_${threads}.log" ]; then
+        echo "日志行数: $(wc -l < mining_${threads}.log)"
+        
+        # 检查错误
+        if grep -i "error\|panic\|failed" "mining_${threads}.log" | head -5; then
+            echo "❌ 发现错误日志"
+        else
+            echo "✅ 未发现错误日志"
+        fi
+        
+        # 检查性能指标
+        if grep -o "active attempts: [0-9]*" "mining_${threads}.log" | tail -5; then
+            echo "✅ 发现活跃挖矿尝试记录"
+        fi
+    fi
+    
+    cd ..
+    echo "测试 $description 完成"
+done
+
+cd ..
+
+echo ""
+echo "=== 测试总结 ==="
+echo "所有测试已完成。检查 $TEST_DIR 目录中的日志文件以获取详细信息。"
+echo ""
+echo "要手动测试挖矿,请运行:"
+echo "  ./target/release/nockchain \\"
+echo "    --mining-pubkey $MINING_PUBKEY \\"
+echo "    --max-concurrent-mining-attempts $CPU_CORES \\"
+echo "    --mine --fakenet --genesis-leader"
+echo ""
+echo "多核挖矿优化测试完成!"