diff --git a/.gitignore b/.gitignore
index d3bc52cfb8..89c07647f5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -270,6 +270,7 @@ modules/blogging/app/Volo.BloggingTestApp/Logs/*.*
modules/blogging/app/Volo.BloggingTestApp/wwwroot/files/*.*
modules/docs/app/VoloDocs.Web/Logs/*.*
modules/setting-management/app/Volo.Abp.SettingManagement.DemoApp/Logs/*.*
+modules/openiddict/app/OpenIddict.Demo.Server/wwwroot/libs/**
templates/module/app/MyCompanyName.MyProjectName.DemoApp/Logs/*.*
templates/module/aspnet-core/host/MyCompanyName.MyProjectName.Blazor.Server.Host/Logs/logs.txt
templates/mvc/src/MyCompanyName.MyProjectName.Web/Logs/*.*
diff --git a/Directory.Packages.props b/Directory.Packages.props
index 7034c62588..1114d54cb3 100644
--- a/Directory.Packages.props
+++ b/Directory.Packages.props
@@ -117,12 +117,12 @@
-
-
-
-
+
+
+
+
-
+
@@ -183,10 +183,10 @@
-
-
-
-
+
+
+
+
diff --git a/docs/en/Community-Articles/2026-03-10-Operation-Rate-Limiting-in-ABP-Framework/POST.md b/docs/en/Community-Articles/2026-03-10-Operation-Rate-Limiting-in-ABP-Framework/POST.md
index 91a9e26aff..d3a851247f 100644
--- a/docs/en/Community-Articles/2026-03-10-Operation-Rate-Limiting-in-ABP-Framework/POST.md
+++ b/docs/en/Community-Articles/2026-03-10-Operation-Rate-Limiting-in-ABP-Framework/POST.md
@@ -1,4 +1,4 @@
-# Operation Rate Limiting in ABP Framework
+# Operation Rate Limiting in ABP
Almost every user-facing system eventually runs into the same problem: **some operations cannot be allowed to run without limits**.
@@ -24,15 +24,9 @@ Real-world requirements tend to look like this:
The pattern is clear: the identity being throttled is a **business identity** — a user, a phone number, a resource ID — not an IP address. And the action being throttled is a **business operation**, not an HTTP request.
-ABP Framework's **Operation Rate Limiting** module is built for exactly this. It lets you enforce limits directly in your application or domain layer, with full awareness of who is doing what.
+ABP's **Operation Rate Limiting** module is built for exactly this. It lets you enforce limits directly in your application or domain layer, with full awareness of who is doing what.
-Add the package to your project:
-
-```bash
-abp add-package Volo.Abp.OperationRateLimiting
-```
-
-> Operation Rate Limiting is available starting from **ABP Framework 10.3**. See the [pull request](https://github.com/abpframework/abp/pull/25024) for details.
+This module is used by the Account (Pro) modules internally and comes pre-installed in the latest startup templates. You must have an [ABP Team or a higher license](https://abp.io/pricing) to use this module.
## Defining a Policy
@@ -66,7 +60,7 @@ public class SmsAppService : ApplicationService
_rateLimitChecker = rateLimitChecker;
}
- public async Task SendCodeAsync(string phoneNumber)
+ public virtual async Task SendCodeAsync(string phoneNumber)
{
await _rateLimitChecker.CheckAsync("SendSmsCode", phoneNumber);
@@ -77,6 +71,74 @@ public class SmsAppService : ApplicationService
`CheckAsync` checks the current usage against the limit and throws `AbpOperationRateLimitingException` (HTTP 429) if the limit is already exceeded. If the check passes, it then increments the counter and proceeds. ABP's exception pipeline catches this automatically and returns a standard error response. Put `CheckAsync` first — the rate limit check is the gate, and everything else only runs if it passes.
+## Declarative Usage with `[OperationRateLimiting]`
+
+The explicit `CheckAsync` approach is useful when you need fine-grained control — for example, when you want to check the limit conditionally, or when the parameter value comes from somewhere other than a method argument. But for the common case where you simply want to enforce a policy on every invocation of a specific method, there's a cleaner way: the `[OperationRateLimiting]` attribute.
+
+```csharp
+public class SmsAppService : ApplicationService
+{
+ [OperationRateLimiting("SendSmsCode")]
+ public virtual async Task SendCodeAsync([RateLimitingParameter] string phoneNumber)
+ {
+ // Rate limit is enforced automatically — no manual CheckAsync needed.
+ await _smsSender.SendAsync(phoneNumber, GenerateCode());
+ }
+}
+```
+
+The attribute works on both **Application Service methods** (via ABP's interceptor) and **MVC Controller actions** (via an action filter). No manual injection of `IOperationRateLimitingChecker` required.
+
+### Providing the Partition Key
+
+When using the attribute, the partition key is resolved from the method's parameters automatically:
+
+- Mark a parameter with `[RateLimitingParameter]` to use its `ToString()` value as the key — this is the most common case when the key is a single primitive like a phone number or email.
+- Have your input DTO implement `IHasOperationRateLimitingParameter` and provide a `GetPartitionParameter()` method — useful when the key is a property buried inside a complex input object.
+
+```csharp
+public class SendSmsCodeInput : IHasOperationRateLimitingParameter
+{
+ public string PhoneNumber { get; set; }
+ public string Language { get; set; }
+
+ public string? GetPartitionParameter() => PhoneNumber;
+}
+
+[OperationRateLimiting("SendSmsCode")]
+public virtual async Task SendCodeAsync(SendSmsCodeInput input)
+{
+ // input.GetPartitionParameter() = input.PhoneNumber is used as the partition key.
+}
+```
+
+If neither is provided, `Parameter` is `null` — which is perfectly valid for policies that use `PartitionByCurrentUser`, `PartitionByClientIp`, or similar partition types that don't rely on an explicit value.
+
+```csharp
+// Policy uses PartitionByCurrentUser — no partition key needed.
+[OperationRateLimiting("GenerateReport")]
+public virtual async Task GenerateMonthlyReportAsync()
+{
+ // Rate limit is checked per current user, automatically.
+}
+```
+
+> The resolution order is: `[RateLimitingParameter]` first, then `IHasOperationRateLimitingParameter`, then `null`. If the method has parameters but none is resolved, a warning is logged to help you catch the misconfiguration early.
+
+You can also place `[OperationRateLimiting]` on the class itself to apply the policy to all public methods:
+
+```csharp
+[OperationRateLimiting("MyServiceLimit")]
+public class MyAppService : ApplicationService
+{
+ public virtual async Task MethodAAsync([RateLimitingParameter] string key) { ... }
+
+ public virtual async Task MethodBAsync([RateLimitingParameter] string key) { ... }
+}
+```
+
+A method-level attribute always takes precedence over the class-level one.
+
## Choosing a Partition Type
The partition type controls **how counters are isolated from each other** — it's the most important decision when setting up a policy, because it determines *what dimension you're counting across*.
@@ -87,7 +149,7 @@ Getting this wrong can make your rate limiting completely ineffective. Using `Pa
- **`PartitionByCurrentUser`** — uses the authenticated user's ID, with no value to pass. Perfect for "each user gets N per day" scenarios where user identity is all you need.
- **`PartitionByClientIp`** — uses the client's IP address. Don't rely on this alone — it's too easy to rotate. Use it as a secondary layer alongside another partition type, as in the login example below.
- **`PartitionByEmail`** and **`PartitionByPhoneNumber`** — designed for pre-authentication flows where the user isn't logged in yet. They prefer the `Parameter` value you explicitly pass, and fall back to the current user's email or phone number if none is provided.
-- **`PartitionBy`** — a custom async delegate that can produce any partition key you need. When the built-in options don't fit, you're free to implement whatever logic makes sense: look up a resource's owner in the database, derive a key from the user's subscription tier, partition by tenant — anything that returns a string.
+- **`PartitionBy`** — a named custom resolver that can produce any partition key you need. Register a resolver function under a unique name via `options.AddPartitionKeyResolver("MyResolver", ctx => ...)`, then reference it by name: `.PartitionBy("MyResolver")`. You can also register and reference in one step: `.PartitionBy("MyResolver", ctx => ...)`. When the built-in options don't fit, you're free to implement whatever logic makes sense: look up a resource's owner in the database, derive a key from the user's subscription tier, partition by tenant — anything that returns a string. Because the resolver is stored by name (not as an anonymous delegate), it can be serialized and managed from a UI or database.
> The rule of thumb: partition by the identity of whoever's behavior you're trying to limit.
@@ -114,6 +176,70 @@ The two counters are completely independent. If `alice` fails 5 times, her accou
When multiple rules are present, the module uses a two-phase approach: it checks all rules first, and only increments counters if every rule passes. This prevents a rule from consuming quota on a request that would have been rejected by another rule anyway.
+## Customizing Policies from Reusable Modules
+
+ABP modules (including your own) can ship with built-in rate limiting policies. For example, an Account module might define a `"Account.SendPasswordResetCode"` policy with conservative defaults that make sense for most applications. When you need different rules in your specific application, you have two options.
+
+**Complete replacement with `AddPolicy`:** call `AddPolicy` with the same name and the second registration wins, replacing all rules from the module:
+
+```csharp
+Configure(options =>
+{
+ options.AddPolicy("Account.SendPasswordResetCode", policy =>
+ {
+ policy.AddRule(rule => rule
+ .WithFixedWindow(TimeSpan.FromMinutes(5), maxCount: 3)
+ .PartitionByEmail());
+ });
+});
+```
+
+**Partial modification with `ConfigurePolicy`:** when you only want to tweak part of a policy — change the error code, add a secondary rule, or tighten the window — use `ConfigurePolicy`. The builder starts pre-populated with the module's existing rules, so you only express what changes.
+
+For example, keep the module's default rules but assign your own localized error code:
+
+```csharp
+Configure(options =>
+{
+ options.ConfigurePolicy("Account.SendPasswordResetCode", policy =>
+ {
+ policy.WithErrorCode("MyApp:PasswordResetLimit");
+ });
+});
+```
+
+Or add a secondary IP-based rule on top of what the module already defined, without touching it:
+
+```csharp
+Configure(options =>
+{
+ options.ConfigurePolicy("Account.SendPasswordResetCode", policy =>
+ {
+ policy.AddRule(rule => rule
+ .WithFixedWindow(TimeSpan.FromHours(1), maxCount: 20)
+ .PartitionByClientIp());
+ });
+});
+```
+
+If you want a clean slate, call `ClearRules()` first and then define entirely new rules — this gives you the same result as `AddPolicy` but makes the intent explicit:
+
+```csharp
+Configure(options =>
+{
+ options.ConfigurePolicy("Account.SendPasswordResetCode", policy =>
+ {
+ policy.ClearRules()
+ .WithFixedWindow(TimeSpan.FromMinutes(10), maxCount: 5)
+ .PartitionByEmail();
+ });
+});
+```
+
+`ConfigurePolicy` throws if the policy name doesn't exist — which catches typos at startup rather than silently doing nothing.
+
+The general rule: use `AddPolicy` for full replacements, `ConfigurePolicy` for surgical modifications.
+
## Beyond Just Checking
Not every scenario calls for throwing an exception. `IOperationRateLimitingChecker` provides three additional methods for more nuanced control.
@@ -179,10 +305,10 @@ public override void ConfigureServices(ServiceConfigurationContext context)
## Summary
-ABP's Operation Rate Limiting fills the gap that ASP.NET Core's HTTP middleware can't: rate limiting with real awareness of *who* is doing *what*. Define a named policy, pick a time window, a max count, and a partition type. Call `CheckAsync` wherever you need it. Counter storage, distributed locking, and exception handling are all taken care of.
+ABP's Operation Rate Limiting fills the gap that ASP.NET Core's HTTP middleware can't: rate limiting with real awareness of *who* is doing *what*. Define a named policy, pick a time window, a max count, and a partition type. Then either call `CheckAsync` explicitly, or just add `[OperationRateLimiting]` to your method and let the framework handle the rest. Counter storage, distributed locking, and exception handling are all taken care of.
## References
-- [Operation Rate Limiting](https://abp.io/docs/latest/framework/infrastructure/operation-rate-limiting)
+- [Operation Rate Limiting (Pro)](https://abp.io/docs/latest/modules/operation-rate-limiting)
- [ASP.NET Core Rate Limiting Middleware](https://learn.microsoft.com/en-us/aspnet/core/performance/rate-limit)
- [Exception Handling](https://abp.io/docs/latest/framework/fundamentals/exception-handling)
diff --git a/docs/en/Community-Articles/2026-03-10-Tutorial-Validator/article.md b/docs/en/Community-Articles/2026-03-10-Tutorial-Validator/article.md
index 2d1b60ebd2..43dee7c712 100644
--- a/docs/en/Community-Articles/2026-03-10-Tutorial-Validator/article.md
+++ b/docs/en/Community-Articles/2026-03-10-Tutorial-Validator/article.md
@@ -1,49 +1,56 @@
-# How We Built TutorialValidator to Automatically Validate Documentation Tutorials
+# Automatically Validate Your Documentation: How We Built a Tutorial Validator
-Writing a tutorial is hard. Keeping it correct over time is even harder.
+Writing a tutorial is difficult. Keeping technical documentation accurate over time is even harder.
+If you maintain developer documentation, you probably know the problem: a tutorial that worked a few months ago can silently break after a framework update, dependency change, or a small missing line in a code snippet.
+New developers follow the guide, encounter an error, and quickly lose trust in the documentation.
+To solve this problem, we built the tutorial validator — an open-source AI-powered tutorial validator that automatically verifies whether a software tutorial actually works from start to finish.
+Instead of manually reviewing documentation, the tutorial validator behaves like a real developer following your guide step by step.
+It reads instructions, runs commands, writes files, executes the application, and verifies expected results.
+We initially created it to automatically validate ABP Framework tutorials, then released it as an open-source tool so anyone can use it to test their own documentation.
-If you maintain technical documentation, you probably know this pain: a tutorial that worked three months ago can quietly break after a framework update, a package change, or a small missing line in a code snippet. New developers follow the steps, hit an error, and lose trust in the docs.
-That exact problem is why we built **TutorialValidator**.
+
-TutorialValidator is an open-source, AI-powered tool that checks whether a software tutorial actually works from start to finish. You give it a tutorial URL, and it behaves like a real developer following the guide: it reads each step, executes commands, writes files, runs the app, and verifies expected results.
-We first created it to validate ABP Framework tutorials internally, then shared it publicly so anyone can use it with their own tutorials.
+## The Problem: Broken Tutorials in Technical Documentation
-
+Many documentation issues are difficult to catch during normal reviews.
+Common problems include:
-## What Problem Does It Solve?
+- A command assumes a file already exists
-Most documentation issues are not obvious during review:
+- A code snippet misses a namespace or import
-- A command assumes a file that has not been created yet
-- A code sample misses a namespace or import
-- A step relies on hidden context that is never explained
-- An endpoint is expected to respond, but does not
+- A tutorial step relies on hidden context
-Traditional proofreading catches wording problems. TutorialValidator targets **execution problems**.
+- An endpoint is expected to respond but fails
-It turns tutorials into something testable.
+- A dependency version changed and breaks the project
-## How It Works (Simple View)
-TutorialValidator runs in three phases:
+Traditional proofreading tools only check grammar or wording.
+**The tutorial validator focuses on execution correctness.**
+It treats tutorials like testable workflows, ensuring that every step works exactly as written.
+
+## How the Tutorial Validator Works?
+
+the tutorial validator validates tutorials using a three-stage pipeline:
1. **Analyst**: Scrapes tutorial pages and converts instructions into a structured test plan
2. **Executor**: Follows the plan step by step in a clean environment
3. **Reporter**: Produces a clear result summary and optional notifications
-
+
+It identifies commands, code edits, HTTP requests, and expected outcomes.
The key idea is simple: if a developer would need to do it, the validator does it too.
-
That includes running terminal commands, editing files, checking HTTP responses, and validating build outcomes.
-
+
-## Why This Approach Is Useful
+## Why Automated Tutorial Validation Matters?
-TutorialValidator is designed for practical documentation quality, not just technical experimentation.
+The tutorial validator is designed for practical documentation quality, not just technical experimentation.
- **Catches real-world breakages early** before readers report them
- **Creates repeatable validation** instead of one-off manual checks
@@ -54,7 +61,7 @@ For example, `junior` and `mid` personas are great for spotting unclear document
## Built for ABP, Open for Everyone
-Even though TutorialValidator was born from ABP documentation needs, it is not limited to ABP content.
+Although TutorialValidator was originally built to validate **ABP Framework tutorials**, it works with **any publicly accessible software tutorial**.
It supports validating any publicly accessible software tutorial and can run in:
@@ -63,28 +70,44 @@ It supports validating any publicly accessible software tutorial and can run in:
It also supports multiple AI providers, including OpenAI, Azure OpenAI, and OpenAI-compatible endpoints.
-## Open Source and Extensible
+## Open Source and Easily Extensible
+
+The tutorial validator is designed with a modular architecture.
+The project consists of multiple focused components:
+
+- **Core** – shared models and contracts
+- **Analyst** – tutorial scraping and step extraction
+- **Executor** – step-by-step execution engine
+- **Orchestrator** – workflow coordination
+- **Reporter** – notifications and result summaries
-TutorialValidator is structured as multiple focused projects:
+This architecture makes it easy to extend the validator with:
-- Core models and shared contracts
-- Analyst for scraping and plan extraction
-- Executor for step-by-step validation
-- Orchestrator for end-to-end workflow
-- Reporter for Email/Discord notifications
+- new step types
+- additional AI providers
+- custom reporting integrations
This architecture keeps the project easy to understand and extend. Teams can add new step types, plugins, or reporting channels based on their own workflow.
## Final Thoughts
-Documentation is part of the product experience. When tutorials fail, trust fails.
+Documentation is a critical part of the product experience.
+When tutorials break, developer trust breaks too.
+TutorialValidator helps teams move from:
+
+> We believe this tutorial works 🙄
+
+to
-TutorialValidator helps teams move from “we think this tutorial works” to “we verified it works.”
+> We verified this tutorial works ✅
+If your team maintains **technical tutorials, developer guides, or framework documentation**, automated tutorial validation can provide a powerful safety net.
+
+Documentation is part of the product experience. When tutorials fail, trust fails.
If your team maintains technical tutorials, this project can give you a practical safety net and a repeatable quality process.
---
-Repository: https://github.com/AbpFramework/TutorialValidator
+You can find the source-code of the tutorial validator at this repo 👉 https://github.com/abpframework/tutorial-validator
-If you try it in your own docs pipeline, we would love to hear your feedback and ideas.
+We would love to hear your feedback, ideas and waiting PRs to improve this application.
diff --git a/docs/en/Community-Articles/2026-03-12-OpenIddict-private-key-jwt/POST.md b/docs/en/Community-Articles/2026-03-12-OpenIddict-private-key-jwt/POST.md
new file mode 100644
index 0000000000..a12779289c
--- /dev/null
+++ b/docs/en/Community-Articles/2026-03-12-OpenIddict-private-key-jwt/POST.md
@@ -0,0 +1,185 @@
+# Secure Client Authentication with private_key_jwt in ABP 10.3
+
+If you've built a confidential client with ABP's OpenIddict module, you know the drill: create an application in the management UI, set a `client_id`, generate a `client_secret`, and paste that secret into your client's `appsettings.json` or environment variables. It works. It's familiar. And for a lot of projects, it's perfectly fine.
+
+But `client_secret` is a **shared secret** — and shared secrets carry an uncomfortable truth: the same value exists in two places at once. The authorization server stores a hash of it in the database, and your client stores the raw value in configuration. That means two potential leak points. Worse, the secret has no inherent identity. Anyone who obtains the string can impersonate your client and the server has no way to tell the difference.
+
+For many teams, this tradeoff is acceptable. But certain scenarios make it hard to ignore:
+
+- **Microservice-to-microservice calls**: A backend mesh of a dozen services, each with its own `client_secret` scattered across deployment configs and CI/CD pipelines. Rotating them across environments without missing one becomes a coordination problem.
+- **Multi-tenant SaaS platforms**: Every tenant's client application deserves truly isolated credentials. With shared secrets, the database holds hashed copies for all tenants — a breach of that table is a breach of everyone's credentials.
+- **Financial-grade API (FAPI) compliance**: Standards like [FAPI 2.0](https://openid.net/specs/fapi-2_0-security-profile.html) explicitly require asymmetric client authentication. `client_secret` doesn't make the cut.
+- **Zero-trust architectures**: In a zero-trust model, identity must be cryptographically provable, not based on a string that can be copied and pasted.
+
+The underlying problem is that a shared secret is just a password. It can be stolen, replicated, and used without leaving a trace. The fix has existed in cryptography for decades: **asymmetric keys**.
+
+With asymmetric key authentication, the client generates a key pair. The public key is registered with the authorization server. The private key never leaves the client. Each time the client needs a token, it signs a short-lived JWT — called a _client assertion_ — with the private key. The server verifies the signature using the registered public key. There is no secret on the server side that could be used to forge a request, because the private key is never transmitted or stored remotely.
+
+This is exactly what the **`private_key_jwt`** client authentication method, defined in [OpenID Connect Core](https://openid.net/specs/openid-connect-core-1_0.html#ClientAuthentication), provides. ABP's OpenIddict module now supports it end-to-end: you register a **JSON Web Key Set (JWKS)** containing your public key through the application management UI (ABP Commercial), and your client authenticates using the corresponding private key. The key generation tooling (`abp generate-jwks`) ships as part of the open-source ABP CLI.
+
+> This feature is available starting from **ABP Framework 10.3**.
+
+## How It Works
+
+The flow is straightforward:
+
+1. The client holds an RSA key pair — **private key** (kept locally) and **public key** (registered on the authorization server as a JWKS).
+2. On each token request, the client uses the private key to sign a JWT with a short expiry and a unique `jti` claim.
+3. The authorization server verifies the signature against the registered public key and issues a token if it checks out.
+
+The private key never leaves the client. Even if someone obtains the authorization server's database, there's nothing there that can be used to generate a valid client assertion.
+
+## Generating a Key Pair
+
+ABP CLI includes a `generate-jwks` command that creates an RSA key pair in the right formats:
+
+```bash
+abp generate-jwks
+```
+
+This produces two files in the current directory:
+
+- `jwks.json` — the public key in JWKS format, to be uploaded to the server
+- `jwks-private.pem` — the private key in PKCS#8 PEM format, to be kept on the client
+
+You can customize the output directory, key size, and signing algorithm:
+
+```bash
+abp generate-jwks --alg RS512 --key-size 4096 -o ./keys -f myapp
+```
+
+> Supported algorithms: `RS256`, `RS384`, `RS512`, `PS256`, `PS384`, `PS512`. The default is `RS256` with a 2048-bit key.
+
+The command also prints the contents of `jwks.json` to the console so you can copy it directly.
+
+## Registering the JWKS in the Management UI
+
+Open **OpenIddict → Applications** in the ABP admin panel and create or edit a confidential application (Client Type: `Confidential`).
+
+In the **Client authentication method** section, you'll find the new **JSON Web Key Set** field.
+
+
+
+Paste the contents of `jwks.json` into the **JSON Web Key Set** field:
+
+```json
+{
+ "keys": [
+ {
+ "kty": "RSA",
+ "use": "sig",
+ "kid": "6444...",
+ "alg": "RS256",
+ "n": "tx...",
+ "e": "AQAB"
+ }
+ ]
+}
+```
+
+Save the application. It's now configured for `private_key_jwt` authentication. You can set either `client_secret` or a JWKS, or both — ABP enforces that a confidential application always has at least one credential.
+
+## Requesting a Token with the Private Key
+
+On the client side, each token request requires building a _client assertion_ JWT signed with the private key. Here's a complete `client_credentials` example:
+
+```csharp
+// Discover the authorization server endpoints (including the issuer URI).
+var client = new HttpClient();
+var configuration = await client.GetDiscoveryDocumentAsync("https://your-auth-server/");
+
+// Load the private key generated by `abp generate-jwks`.
+using var rsaKey = RSA.Create();
+rsaKey.ImportFromPem(await File.ReadAllTextAsync("jwks-private.pem"));
+
+// Read the kid from jwks.json so it stays in sync with the server-registered public key.
+string? signingKid = null;
+if (File.Exists("jwks.json"))
+{
+ using var jwksDoc = JsonDocument.Parse(await File.ReadAllTextAsync("jwks.json"));
+ if (jwksDoc.RootElement.TryGetProperty("keys", out var keysElem) &&
+ keysElem.GetArrayLength() > 0 &&
+ keysElem[0].TryGetProperty("kid", out var kidElem))
+ {
+ signingKid = kidElem.GetString();
+ }
+}
+
+var signingKey = new RsaSecurityKey(rsaKey) { KeyId = signingKid };
+var signingCredentials = new SigningCredentials(signingKey, SecurityAlgorithms.RsaSha256);
+
+// Build the client assertion JWT.
+var now = DateTime.UtcNow;
+var jwtHandler = new JsonWebTokenHandler();
+var clientAssertionToken = jwtHandler.CreateToken(new SecurityTokenDescriptor
+{
+ // OpenIddict requires typ = "client-authentication+jwt" for client assertion JWTs.
+ TokenType = "client-authentication+jwt",
+ Issuer = "MyClientId",
+ // aud must equal the authorization server's issuer URI from the discovery document,
+ // not the token endpoint URL.
+ Audience = configuration.Issuer,
+ Subject = new ClaimsIdentity(new[]
+ {
+ new Claim(JwtRegisteredClaimNames.Sub, "MyClientId"),
+ new Claim(JwtRegisteredClaimNames.Jti, Guid.NewGuid().ToString()),
+ }),
+ IssuedAt = now,
+ NotBefore = now,
+ Expires = now.AddMinutes(5),
+ SigningCredentials = signingCredentials,
+});
+
+// Request a token using the client_credentials flow.
+var tokenResponse = await client.RequestClientCredentialsTokenAsync(
+ new ClientCredentialsTokenRequest
+ {
+ Address = configuration.TokenEndpoint,
+ ClientId = "MyClientId",
+ ClientCredentialStyle = ClientCredentialStyle.PostBody,
+ ClientAssertion = new ClientAssertion
+ {
+ Type = OidcConstants.ClientAssertionTypes.JwtBearer,
+ Value = clientAssertionToken,
+ },
+ Scope = "MyAPI",
+ });
+```
+
+A few things worth paying attention to:
+
+- **`TokenType`** must be `"client-authentication+jwt"`. OpenIddict rejects client assertion JWTs that don't carry this header.
+- **`Audience`** must match the authorization server's issuer URI exactly — use `configuration.Issuer` from the discovery document, not the token endpoint URL.
+- **`Jti`** must be unique per request to prevent replay attacks.
+- Keep **`Expires`** short (five minutes or less). A client assertion is a one-time proof of identity, not a long-lived credential.
+
+This example uses [IdentityModel](https://github.com/IdentityModel/IdentityModel) for the token request helpers and [Microsoft.IdentityModel.JsonWebTokens](https://www.nuget.org/packages/Microsoft.IdentityModel.JsonWebTokens) for JWT creation.
+
+## Key Rotation Without Downtime
+
+One of the practical advantages of JWKS is that it can hold multiple public keys simultaneously. This makes **zero-downtime key rotation** straightforward:
+
+1. Run `abp generate-jwks` to produce a new key pair.
+2. Append the new public key to the `keys` array in your existing `jwks.json` and update the JWKS in the management UI.
+3. Switch the client to sign assertions with the new private key.
+4. Once the transition is complete, remove the old public key from the JWKS.
+
+During the transition window, both the old and new public keys are registered on the server, so any in-flight requests signed with either key will still validate correctly.
+
+## Summary
+
+To use `private_key_jwt` authentication in an ABP Pro application:
+
+1. Run `abp generate-jwks` to generate an RSA key pair.
+2. Paste the `jwks.json` contents into the **JSON Web Key Set** field in the OpenIddict application management UI.
+3. On the client side, sign a short-lived _client assertion_ JWT with the private key — making sure to set the correct `typ`, `aud` (from the discovery document), and a unique `jti` — then use it to request a token.
+
+ABP handles public key storage and validation automatically. OpenIddict handles the signature verification on the token endpoint. As a developer, you only need to keep the private key file secure — there's no shared secret to synchronize between client and server.
+
+## References
+
+- [OpenID Connect Core — Client Authentication](https://openid.net/specs/openid-connect-core-1_0.html#ClientAuthentication)
+- [RFC 7523 — JWT Profile for Client Authentication](https://datatracker.ietf.org/doc/html/rfc7523)
+- [ABP OpenIddict Module Documentation](https://abp.io/docs/latest/modules/openiddict)
+- [ABP CLI Documentation](https://abp.io/docs/latest/cli)
+- [OpenIddict Documentation](https://documentation.openiddict.com/)
diff --git a/docs/en/Community-Articles/2026-03-12-OpenIddict-private-key-jwt/cover.png b/docs/en/Community-Articles/2026-03-12-OpenIddict-private-key-jwt/cover.png
new file mode 100644
index 0000000000..e268703fb6
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-12-OpenIddict-private-key-jwt/cover.png differ
diff --git a/docs/en/Community-Articles/2026-03-12-OpenIddict-private-key-jwt/create-edit-ui.png b/docs/en/Community-Articles/2026-03-12-OpenIddict-private-key-jwt/create-edit-ui.png
new file mode 100644
index 0000000000..1ca04b12bd
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-12-OpenIddict-private-key-jwt/create-edit-ui.png differ
diff --git a/docs/en/Community-Articles/2026-03-17-OpenAI-Compatible-Endpoints/POST.md b/docs/en/Community-Articles/2026-03-17-OpenAI-Compatible-Endpoints/POST.md
new file mode 100644
index 0000000000..6bf5ed5774
--- /dev/null
+++ b/docs/en/Community-Articles/2026-03-17-OpenAI-Compatible-Endpoints/POST.md
@@ -0,0 +1,151 @@
+# One Endpoint, Many AI Clients: Turning ABP Workspaces into OpenAI-Compatible Models
+
+ABP's AI Management module already makes it easy to define and manage AI workspaces (provider, model, API key/base URL, system prompt, permissions, MCP tools, RAG settings, and more). With **ABP v10.2**, there is a major addition: you can now expose those workspaces through **OpenAI-compatible endpoints** under `/v1`.
+
+That changes the integration story in a practical way. Instead of wiring every external tool directly to a provider, you can point those tools to ABP and keep runtime decisions centralized in one place.
+
+In this post, we will walk through a practical setup with **AnythingLLM** and show why this pattern is useful in real projects.
+
+Before we get into the details, here's a quick look at the full flow in action:
+
+## See It in Action: AnythingLLM + ABP
+
+The demo below shows the full flow: connecting an OpenAI-compatible client to ABP, selecting a workspace-backed model, and sending a successful chat request through `/v1`.
+
+
+
+## Why This Is a Big Deal
+
+Many teams end up with AI configuration spread across multiple clients and services. Updating providers, rotating keys, or changing model behavior can become operationally messy.
+
+With ABP in front of your AI traffic:
+
+- Clients keep speaking the familiar OpenAI contract.
+- ABP resolves the requested `model` to a workspace.
+- The workspace decides which provider/model settings are actually used.
+
+This gives you a clean split: standardized client integration outside, governed AI configuration inside.
+
+## Key Concept: Workspace = Model
+
+OpenAI-compatible clients send a `model` value.
+In ABP AI Management, that `model` maps to a **workspace name**.
+
+**For example:**
+
+- Workspace name: `SupportAgent`
+- Client request model: `SupportAgent`
+
+When the client calls `/v1/chat/completions` with `"model": "SupportAgent"`, ABP routes the request to that workspace and applies that workspace's provider (OpenAI, Ollama etc.) and model configuration.
+
+This is the main mental model to keep in mind while integrating any OpenAI-compatible tool with ABP.
+
+## Endpoints Exposed by ABP v10.2
+
+The AI Management module exposes OpenAI-compatible REST endpoints at `/v1`.
+
+| Endpoint | Method | Description |
+| ---------------------------- | ------ | ---------------------------------------------- |
+| `/v1/chat/completions` | POST | Chat completions (streaming and non-streaming) |
+| `/v1/completions` | POST | Legacy text completions |
+| `/v1/models` | GET | List available models (workspaces) |
+| `/v1/models/{modelId}` | GET | Get a single model (workspace) |
+| `/v1/embeddings` | POST | Generate embeddings |
+| `/v1/files` | GET | List files |
+| `/v1/files` | POST | Upload a file |
+| `/v1/files/{fileId}` | GET | Get file metadata |
+| `/v1/files/{fileId}` | DELETE | Delete a file |
+| `/v1/files/{fileId}/content` | GET | Download file content |
+
+All endpoints require `Authorization: Bearer `.
+
+## Quick Setup with AnythingLLM
+
+Before configuration, ensure:
+
+1. AI Management is installed and running in your ABP app.
+2. At least one workspace is created and **active**.
+3. You have a valid Bearer token for your ABP application.
+
+### 1) Get an access token
+
+Use any valid token accepted by your app. In a demo-style setup, token retrieval can look like this:
+
+```bash
+curl -X POST http://localhost:44337/connect/token \
+ -d "grant_type=password&username=admin&password=1q2w3E*&client_id=DemoApp_API&client_secret=1q2w3e*&scope=DemoApp"
+```
+
+Use the returned `access_token` as the API key value in your OpenAI-compatible client.
+
+### 2) Configure AnythingLLM as Generic OpenAI
+
+In **AnythingLLM -> Settings -> LLM Preference**, select **Generic OpenAI** and set:
+
+| Setting | Value |
+| -------------------- | --------------------------- |
+| Base URL | `http://localhost:44337/v1` |
+| API Key | `` |
+| Chat Model Selection | Select an active workspace |
+
+In most OpenAI-compatible UIs, the app adds `Bearer` automatically, so the API key field should contain only the raw token string.
+
+### 3) Optional: configure embeddings
+
+If you want RAG flows through ABP, go to **Settings -> Embedding Preference** and use the same Base URL/API key values.
+Then select a workspace that has embedder settings configured.
+
+## Validate the Flow
+
+### List models (workspaces)
+
+```bash
+curl http://localhost:44337/v1/models \
+ -H "Authorization: Bearer "
+```
+
+### Chat completion
+
+```bash
+curl -X POST http://localhost:44337/v1/chat/completions \
+ -H "Authorization: Bearer " \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": "MyWorkspace",
+ "messages": [
+ { "role": "user", "content": "Hello from ABP OpenAI-compatible endpoint!" }
+ ]
+ }'
+```
+
+### Optional SDK check (Python)
+
+```python
+from openai import OpenAI
+
+client = OpenAI(
+ base_url="http://localhost:44337/v1",
+ api_key=""
+)
+
+response = client.chat.completions.create(
+ model="MyWorkspace",
+ messages=[{"role": "user", "content": "Hello!"}]
+)
+
+print(response.choices[0].message.content)
+```
+
+## Where This Fits in Real Projects
+
+This approach is a strong fit when you want to:
+
+- Keep ABP as the central control plane for AI workspaces.
+- Let client tools integrate through a standard OpenAI contract.
+- Switch providers or model settings without rewriting client-side integration.
+
+If your team uses multiple AI clients, this pattern keeps integration simple while preserving control where it matters.
+
+## Learn More
+
+- [ABP AI Management Documentation](https://abp.io/docs/10.2/modules/ai-management)
diff --git a/docs/en/Community-Articles/2026-03-17-OpenAI-Compatible-Endpoints/cover-image.png b/docs/en/Community-Articles/2026-03-17-OpenAI-Compatible-Endpoints/cover-image.png
new file mode 100644
index 0000000000..3024f341b4
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-OpenAI-Compatible-Endpoints/cover-image.png differ
diff --git a/docs/en/Community-Articles/2026-03-17-OpenAI-Compatible-Endpoints/openai-compatible-endpoints-demo.gif b/docs/en/Community-Articles/2026-03-17-OpenAI-Compatible-Endpoints/openai-compatible-endpoints-demo.gif
new file mode 100644
index 0000000000..e1c830087b
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-OpenAI-Compatible-Endpoints/openai-compatible-endpoints-demo.gif differ
diff --git a/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/POST.md b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/POST.md
new file mode 100644
index 0000000000..dcb69c289d
--- /dev/null
+++ b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/POST.md
@@ -0,0 +1,167 @@
+# Shared User Accounts in ABP Multi-Tenancy
+
+Multi-tenancy is built on **isolation** — isolated data, isolated permissions, isolated users. ABP's default behavior has always followed this assumption: one user belongs to exactly one tenant. Clean, simple, no ambiguity. For most SaaS applications, that's exactly what you want. (The new `TenantUserSharingStrategy` enum formally names this default behavior `Isolated`.)
+
+But isolation is **the system's** concern, not **the user's**. In practice, people's work doesn't always line up neatly with tenant boundaries.
+
+Think about a financial consultant who works with three different companies — each one a tenant in your system. Under the Isolated model, she needs three separate accounts, three passwords. Forgot which password goes with which company? Good luck. Worse, the system sees three unrelated people — there's nothing linking those accounts to the same human being.
+
+This comes up more often than you'd think:
+
+- In a **corporate group**, an IT admin manages multiple subsidiaries, each running as its own tenant. Every day means logging out, logging back in with different credentials, over and over
+- A **SaaS platform's ops team** needs to hop into different customer tenants to debug issues. Each time they create a throwaway account, then delete it — or just share one account and lose all audit trail
+- Some users resort to email aliases (`alice+company1@example.com`) to work around uniqueness constraints — that's not a solution, that's a hack
+
+The common thread here: the user's **identity** is global, but their **working context** is per-tenant. The problem isn't a technical limitation — it's that the Isolated assumption ("one user, one tenant") simply doesn't hold in these scenarios.
+
+What's needed is not "one account per tenant" but "one account, multiple tenants."
+
+ABP's **Shared User Accounts** (`TenantUserSharingStrategy.Shared`) does exactly this. It makes user identity global and turns tenants into workspaces that a user can join and switch between — similar to how one person can belong to multiple workspaces in Slack.
+
+> This is a **commercial** feature, available starting from **ABP 10.2**, provided by the Account.Pro and Identity.Pro modules.
+
+## Enabling the Shared Strategy
+
+A single configuration is all it takes:
+
+```csharp
+Configure(options =>
+{
+ options.IsEnabled = true;
+ options.UserSharingStrategy = TenantUserSharingStrategy.Shared;
+});
+```
+
+The most important behavior change after switching to Shared: **username and email uniqueness become global** instead of per-tenant. This follows naturally — if the same account needs to be recognized across tenants, its identifiers must be unique across the entire system.
+
+Security-related settings (2FA, account lockout, password policies, captcha, etc.) are also managed at the **Host** level. This makes sense too: if user identity is global, the security rules around it should be global as well.
+
+## One Account, Multiple Tenants
+
+With the Shared strategy enabled, the day-to-day user experience changes fundamentally.
+
+When a user is associated with only one tenant, the system recognizes it automatically and signs them in directly — the user doesn't even notice that tenants exist. When the user belongs to multiple tenants, the login flow presents a tenant selection screen after credentials are verified:
+
+
+
+After signing into a tenant, a tenant switcher appears in the user menu — click it anytime to jump to another tenant without signing out. ABP re-issues the authentication ticket (with the new `TenantId` in the claims) on each switch, so the permission system is fully independent per tenant.
+
+
+
+Users can also leave a tenant. Leaving doesn't delete the association record — it marks it as inactive. This preserves foreign key relationships with other entities. If the user is invited back later, the association is simply reactivated instead of recreated.
+
+Back to our earlier scenario: the financial consultant now has one account, one password. She picks which company to work in at login, switches between them during the day. The system knows it's the same person, and the audit log can trace her actions across every tenant.
+
+## Invitations
+
+Users don't just appear in a tenant — someone has to invite them. This is the core operation from the administrator's perspective.
+
+A tenant admin opens the invitation dialog, enters one or more email addresses (batch invitations are supported), and can pre-assign roles — so the user gets the right permissions the moment they join, no extra setup needed:
+
+
+
+The invited person receives an email with a link. What happens next depends on whether they already have an account.
+
+If they **already have an account**, they see a confirmation page and can join the tenant with a single click:
+
+
+
+If they **don't have an account yet**, the link takes them to a registration form. Once they register, they're automatically added to the tenant:
+
+
+
+Admins can also manage pending invitations at any time — resend emails or revoke invitations.
+
+> The invitation feature is also available under the Isolated strategy, but invited users can only join a single tenant.
+
+## Setting Up a New Tenant
+
+There's a notable shift in how new tenants are bootstrapped.
+
+Under the Isolated model, creating a tenant typically seeds an `admin` user automatically. With Shared, this no longer happens — because users are global, and it doesn't make sense to create one out of thin air for a specific tenant.
+
+Instead, you create the tenant first, then invite someone in and grant them the admin role.
+
+
+
+
+
+This is a natural fit — the admin is just a global user who happens to hold the admin role in this particular tenant.
+
+## Where Do Newly Registered Users Go?
+
+Under the Shared strategy, self-registration runs into an interesting problem: the system doesn't know which tenant the user wants to join. Without being signed in, tenant context is usually determined by subdomain or a tenant switcher on the login page — but for a brand-new user, those signals might not exist at all.
+
+So ABP's approach is: **don't establish any tenant association at registration time**. A newly registered user doesn't belong to any tenant, and doesn't belong to the Host either — this is an entirely new state. ABP still lets these users sign in, change their password, and manage their account, but they can't access any permission-protected features within a tenant.
+
+`AbpIdentityPendingTenantUserOptions.Strategy` controls what happens in this "pending" state.
+
+**CreateTenant** — automatically creates a tenant for the new user. This fits the "sign up and get your own workspace" pattern, like how Slack or Notion handles registration: you register, the system spins up a workspace for you.
+
+```csharp
+Configure(options =>
+{
+ options.Strategy = AbpIdentityPendingTenantUserStrategy.CreateTenant;
+});
+```
+
+
+
+**Inform** (the default) — shows a message telling the user to contact an administrator to join a tenant. This is the right choice for invite-only platforms where users must be brought in by an existing tenant admin.
+
+```csharp
+Configure(options =>
+{
+ options.Strategy = AbpIdentityPendingTenantUserStrategy.Inform;
+});
+```
+
+
+
+There's also a **Redirect** strategy that sends the user to a custom URL for more complex flows.
+
+> See the [official documentation](https://abp.io/docs/latest/modules/account/shared-user-accounts) for full configuration details.
+
+## Database Considerations
+
+The Shared strategy introduces some mechanisms and constraints at the database level that are worth understanding.
+
+### Global Uniqueness: Enforced in Code, Not by Database Indexes
+
+Username and email uniqueness checks must span all tenants. ABP disables the tenant filter (`TenantFilter.Disable()`) during validation and searches globally for conflicts.
+
+A notable design choice here: **global uniqueness is enforced at the application level, not through database unique indexes**. The reason is practical — in a database-per-tenant setup, users live in separate physical databases, so a cross-database unique index simply isn't possible. Even in a shared database, soft-delete complicates unique indexes (you'd need a composite index on "username + deletion time"). So ABP handles this in application code instead.
+
+To keep things safe under concurrency — say two tenant admins invite the same email address at the same time — ABP uses a **distributed lock** to serialize uniqueness validation. This means your production environment needs a distributed lock provider configured (such as Redis).
+
+The uniqueness check goes beyond just "no duplicate usernames." ABP also checks for **cross-field conflicts**: a user's username can't match another user's email, and vice versa. This prevents identity confusion in edge cases.
+
+### Tenants with Separate Databases
+
+If some of your tenants use their own database (database-per-tenant), the Shared strategy requires extra attention.
+
+The login flow and tenant selection happen on the **Host side**. This means the Host database's `AbpUsers` table must contain records for all users — even those originally created in a tenant's separate database. ABP's approach is replication: it saves the primary user record in the Host context and creates a copy in the tenant context. In a shared-database setup, both records live in the same table; in a database-per-tenant setup, they live in different physical databases. Updates and deletes are kept in sync automatically.
+
+If your application uses social login or passkeys, the `AbpUserLogins` and `AbpUserPasskeys` tables also need to be synced in the Host database.
+
+### Migrating from the Isolated Strategy
+
+If you're moving an existing multi-tenant application from Isolated to Shared, ABP automatically runs a global uniqueness check when you switch the strategy and reports any conflicts.
+
+The most common conflict: the same email address registered as separate users in different tenants. You'll need to resolve these first — merge the accounts or change one side's email — before the Shared strategy can be enabled.
+
+## Summary
+
+ABP's Shared User Accounts addresses a real-world need in multi-tenant systems: one person working across multiple tenants.
+
+- One configuration switch to `TenantUserSharingStrategy.Shared`
+- User experience: pick a tenant at login, switch between tenants anytime, one password for everything
+- Admin experience: invite users by email, pre-assign roles on invitation
+- Database notes: configure a distributed lock provider for production; tenants with separate databases need user records replicated in the Host database
+
+ABP takes care of global uniqueness validation, tenant association management, and login flow adaptation under the hood.
+
+## References
+
+- [Shared User Accounts](https://abp.io/docs/latest/modules/account/shared-user-accounts)
+- [ABP Multi-Tenancy](https://abp.io/docs/latest/framework/architecture/multi-tenancy)
diff --git a/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/cover.png b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/cover.png
new file mode 100644
index 0000000000..33cbea2f52
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/cover.png differ
diff --git a/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/exist-user-accept.png b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/exist-user-accept.png
new file mode 100644
index 0000000000..23f35c0904
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/exist-user-accept.png differ
diff --git a/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/invite-admin-user-to-join-tenant-modal.png b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/invite-admin-user-to-join-tenant-modal.png
new file mode 100644
index 0000000000..8fa9d2fee9
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/invite-admin-user-to-join-tenant-modal.png differ
diff --git a/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/invite-admin-user-to-join-tenant.png b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/invite-admin-user-to-join-tenant.png
new file mode 100644
index 0000000000..edfb5bedb0
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/invite-admin-user-to-join-tenant.png differ
diff --git a/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/invite-user.png b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/invite-user.png
new file mode 100644
index 0000000000..67a3f04073
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/invite-user.png differ
diff --git a/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/new-user-accept.png b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/new-user-accept.png
new file mode 100644
index 0000000000..ffc887f1ed
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/new-user-accept.png differ
diff --git a/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/new-user-join-strategy-create-tenant.png b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/new-user-join-strategy-create-tenant.png
new file mode 100644
index 0000000000..7d4a64c7c0
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/new-user-join-strategy-create-tenant.png differ
diff --git a/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/new-user-join-strategy-inform.png b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/new-user-join-strategy-inform.png
new file mode 100644
index 0000000000..a6a62e1c96
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/new-user-join-strategy-inform.png differ
diff --git a/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/switch-tenant.png b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/switch-tenant.png
new file mode 100644
index 0000000000..6f19de1da7
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/switch-tenant.png differ
diff --git a/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/tenant-selection.png b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/tenant-selection.png
new file mode 100644
index 0000000000..e40bf6aaeb
Binary files /dev/null and b/docs/en/Community-Articles/2026-03-17-Shared-User-Accounts-in-ABP/tenant-selection.png differ
diff --git a/docs/en/cli/index.md b/docs/en/cli/index.md
index e376887baf..bdbe9493b9 100644
--- a/docs/en/cli/index.md
+++ b/docs/en/cli/index.md
@@ -75,6 +75,7 @@ Here is the list of all available commands before explaining their details:
* **[`install-old-cli`](../cli#install-old-cli)**: Installs old ABP CLI.
* **[`mcp-studio`](../cli#mcp-studio)**: Starts ABP Studio MCP bridge for AI tools (requires ABP Studio running).
* **[`generate-razor-page`](../cli#generate-razor-page)**: Generates a page class that you can use it in the ASP NET Core pipeline to return an HTML page.
+* **[`generate-jwks`](../cli#generate-jwks)**: Generates an RSA key pair (JWKS public key + PEM private key) for OpenIddict `private_key_jwt` client authentication.
### help
@@ -1127,6 +1128,99 @@ app.Use(async (httpContext, next) =>
* ```--version``` or ```-v```: Specifies the version for ABP CLI to be installed.
+### generate-jwks
+
+Generates an RSA key pair for use with OpenIddict `private_key_jwt` client authentication.
+
+The command produces two files:
+
+| File | Description |
+|---|---|
+| `.json` | JWKS (JSON Web Key Set) containing the **public key**. Paste this into the **JSON Web Key Set** field of your OpenIddict application in the ABP management UI. |
+| `-private.pem` | PKCS#8 PEM **private key**. Store this securely in your client application and use it to sign JWT client assertions. |
+
+> **Security notice:** Never commit the private key file to source control. Add it to `.gitignore`. Only the JWKS (public key) needs to be shared with the authorization server.
+
+Usage:
+
+```bash
+abp generate-jwks [options]
+```
+
+#### Options
+
+* `--output` or `-o`: Output directory. Defaults to the current directory.
+* `--key-size` or `-s`: RSA key size in bits. Supported values: `2048` (default), `4096`.
+* `--alg`: Signing algorithm. Supported values: `RS256` (default), `RS384`, `RS512`, `PS256`, `PS384`, `PS512`.
+* `--kid`: Custom Key ID. Auto-generated if not specified.
+* `--file` or `-f`: Output file name prefix. Defaults to `jwks`. Generates `.json` and `-private.pem`.
+
+#### Examples
+
+```bash
+# Generate with defaults (2048-bit RS256, current directory)
+abp generate-jwks
+
+# Generate with RS512 and 4096-bit key
+abp generate-jwks --alg RS512 --key-size 4096
+
+# Output to a specific directory with a custom file prefix
+abp generate-jwks -o ./keys -f myapp
+```
+
+#### Workflow
+
+1. Run `abp generate-jwks` to generate the key pair.
+
+2. Open the ABP OpenIddict application management UI, select your **Confidential** application, choose **JWKS (private_key_jwt)** as the authentication method, and paste the contents of `jwks.json` into the **JSON Web Key Set** field.
+
+3. In your client application, load the private key from the PEM file and sign JWT client assertions:
+
+```csharp
+// Load private key from PEM file
+using var rsa = RSA.Create();
+rsa.ImportFromPem(await File.ReadAllTextAsync("jwks-private.pem"));
+
+// The kid must match the "kid" field in the JWKS registered on the server
+var signingKey = new RsaSecurityKey(rsa) { KeyId = "" };
+var signingCredentials = new SigningCredentials(signingKey, SecurityAlgorithms.RsaSha256);
+
+var now = DateTime.UtcNow;
+var jwtHandler = new JsonWebTokenHandler();
+var clientAssertion = jwtHandler.CreateToken(new SecurityTokenDescriptor
+{
+ // OpenIddict requires typ = "client-authentication+jwt"
+ TokenType = "client-authentication+jwt",
+ // iss and sub must both equal the client_id
+ Issuer = "",
+ Audience = "",
+ Subject = new ClaimsIdentity(new[]
+ {
+ new Claim(JwtRegisteredClaimNames.Sub, ""),
+ new Claim(JwtRegisteredClaimNames.Jti, Guid.NewGuid().ToString()),
+ }),
+ IssuedAt = now,
+ NotBefore = now,
+ Expires = now.AddMinutes(5),
+ SigningCredentials = signingCredentials,
+});
+
+// Use the assertion in the token request
+var tokenResponse = await httpClient.RequestClientCredentialsTokenAsync(
+ new ClientCredentialsTokenRequest
+ {
+ Address = "",
+ ClientId = "",
+ ClientCredentialStyle = ClientCredentialStyle.PostBody,
+ ClientAssertion = new ClientAssertion
+ {
+ Type = OidcConstants.ClientAssertionTypes.JwtBearer,
+ Value = clientAssertion,
+ },
+ Scope = "",
+ });
+```
+
## See Also
* [Examples for the new command](./new-command-samples.md)
diff --git a/docs/en/docs-nav.json b/docs/en/docs-nav.json
index 0cbdb312ac..b0a76124b4 100644
--- a/docs/en/docs-nav.json
+++ b/docs/en/docs-nav.json
@@ -807,10 +807,6 @@
"text": "Object to Object Mapping",
"path": "framework/infrastructure/object-to-object-mapping.md"
},
- {
- "text": "Operation Rate Limiting",
- "path": "framework/infrastructure/operation-rate-limiting.md"
- },
{
"text": "Settings",
"path": "framework/infrastructure/settings.md"
@@ -2577,6 +2573,10 @@
"text": "Language Management (Pro)",
"path": "modules/language-management.md"
},
+ {
+ "text": "Operation Rate Limiting (Pro)",
+ "path": "modules/operation-rate-limiting.md"
+ },
{
"text": "OpenIddict",
"isLazyExpandable": true,
diff --git a/docs/en/framework/api-development/auto-controllers.md b/docs/en/framework/api-development/auto-controllers.md
index b40718b079..2c6f0ee39a 100644
--- a/docs/en/framework/api-development/auto-controllers.md
+++ b/docs/en/framework/api-development/auto-controllers.md
@@ -70,7 +70,7 @@ Route is calculated based on some conventions:
* Continues with a **route path**. Default value is '**/app**' and can be configured as like below:
````csharp
-Configure(options =>
+PreConfigure(options =>
{
options.ConventionalControllers
.Create(typeof(BookStoreApplicationModule).Assembly, opts =>
@@ -149,7 +149,7 @@ public class PersonAppService : ApplicationService
You can further filter classes to become an API controller by providing the `TypePredicate` option:
````csharp
-services.Configure(options =>
+PreConfigure(options =>
{
options.ConventionalControllers
.Create(typeof(BookStoreApplicationModule).Assembly, opts =>
diff --git a/docs/en/framework/infrastructure/background-jobs/tickerq.md b/docs/en/framework/infrastructure/background-jobs/tickerq.md
index de7b3631f2..81dd021607 100644
--- a/docs/en/framework/infrastructure/background-jobs/tickerq.md
+++ b/docs/en/framework/infrastructure/background-jobs/tickerq.md
@@ -95,13 +95,13 @@ public class CleanupJobs
public override Task OnPreApplicationInitializationAsync(ApplicationInitializationContext context)
{
var abpTickerQFunctionProvider = context.ServiceProvider.GetRequiredService();
- abpTickerQFunctionProvider.Functions.TryAdd(nameof(CleanupJobs), (string.Empty, TickerTaskPriority.Normal, new TickerFunctionDelegate(async (cancellationToken, serviceProvider, tickerFunctionContext) =>
+ abpTickerQFunctionProvider.AddFunction(nameof(CleanupJobs), async (cancellationToken, serviceProvider, tickerFunctionContext) =>
{
var service = new CleanupJobs(); // Or get it from the serviceProvider
var request = await TickerRequestProvider.GetRequestAsync(tickerFunctionContext, cancellationToken);
var genericContext = new TickerFunctionContext(tickerFunctionContext, request);
await service.CleanupLogsAsync(genericContext, cancellationToken);
- })));
+ }, TickerTaskPriority.Normal);
abpTickerQFunctionProvider.RequestTypes.TryAdd(nameof(CleanupJobs), (typeof(string).FullName, typeof(string)));
return Task.CompletedTask;
}
diff --git a/docs/en/framework/infrastructure/background-workers/tickerq.md b/docs/en/framework/infrastructure/background-workers/tickerq.md
index d4ddde3cd9..4547b85b85 100644
--- a/docs/en/framework/infrastructure/background-workers/tickerq.md
+++ b/docs/en/framework/infrastructure/background-workers/tickerq.md
@@ -83,13 +83,13 @@ public class CleanupJobs
public override Task OnPreApplicationInitializationAsync(ApplicationInitializationContext context)
{
var abpTickerQFunctionProvider = context.ServiceProvider.GetRequiredService();
- abpTickerQFunctionProvider.Functions.TryAdd(nameof(CleanupJobs), (string.Empty, TickerTaskPriority.Normal, new TickerFunctionDelegate(async (cancellationToken, serviceProvider, tickerFunctionContext) =>
+ abpTickerQFunctionProvider.AddFunction(nameof(CleanupJobs), async (cancellationToken, serviceProvider, tickerFunctionContext) =>
{
var service = new CleanupJobs(); // Or get it from the serviceProvider
var request = await TickerRequestProvider.GetRequestAsync(tickerFunctionContext, cancellationToken);
var genericContext = new TickerFunctionContext(tickerFunctionContext, request);
await service.CleanupLogsAsync(genericContext, cancellationToken);
- })));
+ }, TickerTaskPriority.Normal);
abpTickerQFunctionProvider.RequestTypes.TryAdd(nameof(CleanupJobs), (typeof(string).FullName, typeof(string)));
return Task.CompletedTask;
}
@@ -112,11 +112,11 @@ await cronTickerManager.AddAsync(new CronTickerEntity
You can specify a cron expression instead of using `ICronTickerManager` to add a worker:
```csharp
-abpTickerQFunctionProvider.Functions.TryAdd(nameof(CleanupJobs), (string.Empty, TickerTaskPriority.Normal, new TickerFunctionDelegate(async (cancellationToken, serviceProvider, tickerFunctionContext) =>
+abpTickerQFunctionProvider.AddFunction(nameof(CleanupJobs), async (cancellationToken, serviceProvider, tickerFunctionContext) =>
{
var service = new CleanupJobs();
var request = await TickerRequestProvider.GetRequestAsync(tickerFunctionContext, cancellationToken);
var genericContext = new TickerFunctionContext(tickerFunctionContext, request);
await service.CleanupLogsAsync(genericContext, cancellationToken);
-})));
+}, TickerTaskPriority.Normal);
```
diff --git a/docs/en/framework/infrastructure/entity-cache.md b/docs/en/framework/infrastructure/entity-cache.md
index 50f1bc1b3c..aed1f33c6e 100644
--- a/docs/en/framework/infrastructure/entity-cache.md
+++ b/docs/en/framework/infrastructure/entity-cache.md
@@ -26,7 +26,7 @@ public class Product : AggregateRoot
public string Name { get; set; }
public string Description { get; set; }
- public float Price { get; set; }
+ public decimal Price { get; set; }
public int StockCount { get; set; }
}
```
@@ -72,7 +72,7 @@ public class ProductDto : EntityDto
{
public string Name { get; set; }
public string Description { get; set; }
- public float Price { get; set; }
+ public decimal Price { get; set; }
public int StockCount { get; set; }
}
```
@@ -147,6 +147,115 @@ context.Services.AddEntityCache(
* Entity classes should be serializable/deserializable to/from JSON to be cached (because it's serialized to JSON when saving in the [Distributed Cache](../fundamentals/caching.md)). If your entity class is not serializable, you can consider using a cache-item/DTO class instead, as explained before.
* Entity Caching System is designed as **read-only**. You should use the standard [repository](../architecture/domain-driven-design/repositories.md) methods to manipulate the entity if you need to. If you need to manipulate (update) the entity, do not get it from the entity cache. Instead, read it from the repository, change it and update using the repository.
+## Getting Multiple Entities
+
+In addition to the single-entity methods `FindAsync` and `GetAsync`, the `IEntityCache` service provides batch retrieval methods for retrieving multiple entities at once.
+
+### List-Based Batch Retrieval
+
+`FindManyAsync` and `GetManyAsync` return results as a list, preserving the order of the given IDs (including duplicates):
+
+```csharp
+public class ProductAppService : ApplicationService, IProductAppService
+{
+ private readonly IEntityCache _productCache;
+
+ public ProductAppService(IEntityCache productCache)
+ {
+ _productCache = productCache;
+ }
+
+ public async Task> GetManyAsync(List ids)
+ {
+ return await _productCache.GetManyAsync(ids);
+ }
+
+ public async Task> FindManyAsync(List ids)
+ {
+ return await _productCache.FindManyAsync(ids);
+ }
+}
+```
+
+* `GetManyAsync` throws `EntityNotFoundException` if any entity is not found for the given IDs.
+* `FindManyAsync` returns a list where each entry corresponds to the given ID in the same order; an entry will be `null` if the entity was not found.
+
+### Dictionary-Based Batch Retrieval
+
+`FindManyAsDictionaryAsync` and `GetManyAsDictionaryAsync` return results as a dictionary keyed by entity ID, which is convenient when you need fast lookup by ID:
+
+```csharp
+public async Task> FindManyAsDictionaryAsync(List ids)
+{
+ return await _productCache.FindManyAsDictionaryAsync(ids);
+}
+
+public async Task> GetManyAsDictionaryAsync(List ids)
+{
+ return await _productCache.GetManyAsDictionaryAsync(ids);
+}
+```
+
+* `GetManyAsDictionaryAsync` throws `EntityNotFoundException` if any entity is not found for the given IDs.
+* `FindManyAsDictionaryAsync` returns a dictionary where the value is `null` if the entity was not found for the corresponding key.
+
+All batch methods internally use `IDistributedCache.GetOrAddManyAsync` to batch-fetch only the cache-missed entities from the database, making them more efficient than calling `FindAsync` or `GetAsync` in a loop.
+
+## Custom Object Mapping
+
+When you need full control over how an entity is mapped to a cache item, you can derive from `EntityCacheWithObjectMapper` and override the `MapToValue` method:
+
+First, define the cache item class:
+
+```csharp
+public class ProductCacheDto
+{
+ public Guid Id { get; set; }
+ public string Name { get; set; }
+ public decimal Price { get; set; }
+}
+```
+
+Then, derive from `EntityCacheWithObjectMapper` and override `MapToValue`:
+
+```csharp
+public class ProductEntityCache :
+ EntityCacheWithObjectMapper
+{
+ public ProductEntityCache(
+ IReadOnlyRepository repository,
+ IDistributedCache, Guid> cache,
+ IUnitOfWorkManager unitOfWorkManager,
+ IObjectMapper objectMapper)
+ : base(repository, cache, unitOfWorkManager, objectMapper)
+ {
+ }
+
+ protected override ProductCacheDto MapToValue(Product entity)
+ {
+ // Custom mapping logic here
+ return new ProductCacheDto
+ {
+ Id = entity.Id,
+ Name = entity.Name.ToUpperInvariant(),
+ Price = entity.Price
+ };
+ }
+}
+```
+
+Register your custom cache class in the `ConfigureServices` method of your [module class](../architecture/modularity/basics.md):
+
+```csharp
+context.Services.ReplaceEntityCache(
+ new DistributedCacheEntryOptions
+ {
+ AbsoluteExpirationRelativeToNow = TimeSpan.FromMinutes(10)
+ });
+```
+
+> If no prior `AddEntityCache` registration exists for the same cache item type, `ReplaceEntityCache` will simply add the service instead of throwing an error.
+
## See Also
* [Distributed caching](../fundamentals/caching.md)
diff --git a/docs/en/framework/ui/maui/index.md b/docs/en/framework/ui/maui/index.md
index 547617bcae..2622bdc7e0 100644
--- a/docs/en/framework/ui/maui/index.md
+++ b/docs/en/framework/ui/maui/index.md
@@ -42,15 +42,29 @@ You can examine the [Users Page](#users-page) or any other pre-defined page to s
### Android
-If you get the following error when connecting to the emulator or a physical phone, you need to set up port mapping.
+If you get the following error when connecting to the emulator or a physical phone, you need to set up port mapping using the `adb` tool:
```
Cannot connect to the backend on localhost.
```
-Open a command line terminal and run the `adb reverse` command to expose a port on your Android device to a port on your computer. For example:
+**How to get and use `adb` tool:**
-`adb reverse tcp:44305 tcp:44305`
+- **Option 1: Install `adb` globally**
+ Download and install the [Android SDK Platform-Tools](https://developer.android.com/tools/releases/platform-tools) to get the [`adb`](https://developer.android.com/tools/adb) command-line tool.
+- **Option 2: Use Visual Studio’s built-in `adb` command prompt**
+ If you are using Visual Studio, you can access the `adb` command prompt directly from the IDE:
+ 
+
+> For more information on setting up your environment for Android development and debugging, refer to the [Microsoft MAUI Android device setup guide](https://learn.microsoft.com/en-us/dotnet/maui/android/device/setup).
+
+**Port mapping command:**
+
+Once `adb` is available, run the following command in your terminal (or Visual Studio's `adb` command prompt) to map the backend port to your Android device:
+
+```bash
+adb reverse tcp:44305 tcp:44305
+```
> Replace `44305` with the port number your backend application is running on.
>
diff --git a/docs/en/images/adb-command-prompt.png b/docs/en/images/adb-command-prompt.png
new file mode 100644
index 0000000000..19eb84b90f
Binary files /dev/null and b/docs/en/images/adb-command-prompt.png differ
diff --git a/docs/en/modules/ai-management/index.md b/docs/en/modules/ai-management/index.md
index 38693fc366..a551d5beef 100644
--- a/docs/en/modules/ai-management/index.md
+++ b/docs/en/modules/ai-management/index.md
@@ -44,6 +44,16 @@ abp add-package Volo.AIManagement.OpenAI
abp add-package Volo.AIManagement.Ollama
```
+> [!IMPORTANT]
+> If you use Ollama, make sure the Ollama server is installed and running, and that the models referenced by your workspace are already available locally. Before configuring an Ollama workspace, pull the chat model and any embedding model you plan to use. For example:
+>
+> ```bash
+> ollama pull llama3.2
+> ollama pull nomic-embed-text
+> ```
+>
+> Replace the model names with the exact models you configure in the workspace. `nomic-embed-text` is an embedding-only model and can't be used as a chat model.
+
> [!TIP]
> You can install multiple provider packages to support different AI providers simultaneously in your workspaces.
@@ -308,6 +318,14 @@ RAG requires an **embedder** and a **vector store** to be configured on the work
* **Embedder**: Converts documents and queries into vector embeddings. You can use any provider that supports embedding generation (e.g., OpenAI `text-embedding-3-small`, Ollama `nomic-embed-text`).
* **Vector Store**: Stores and retrieves vector embeddings. Supported providers: **MongoDb**, **Pgvector**, and **Qdrant**.
+> [!IMPORTANT]
+> If the workspace uses Ollama for chat or embeddings, the configured model names must exist in the local Ollama instance first. For example, if you configure `ModelName = "llama3.2"` and `EmbedderModelName = "nomic-embed-text"`, pull both models before using the workspace:
+>
+> ```bash
+> ollama pull llama3.2
+> ollama pull nomic-embed-text
+> ```
+
### Configuring RAG on a Workspace
To enable RAG for a workspace, configure the embedder and vector store settings in the workspace edit page.
@@ -432,6 +450,67 @@ The options class also provides helper methods:
> [!NOTE]
> Adding new file extensions also requires a matching content extractor to be registered for document processing. The built-in extractors support `.txt`, `.md`, and `.pdf` files.
+#### Hosting-Level Upload Limits
+
+`WorkspaceDataSourceOptions.MaxFileSize` controls the module-level validation, but your hosting stack may reject large uploads before the request reaches AI Management. If you increase `MaxFileSize`, make sure the underlying server and proxy limits are also updated.
+
+Typical limits to review:
+
+* **ASP.NET Core form/multipart limit** (`FormOptions.MultipartBodyLengthLimit`)
+* **Kestrel request body limit** (`KestrelServerLimits.MaxRequestBodySize`)
+* **IIS request filtering limit** (`maxAllowedContentLength`)
+* **Reverse proxy limits** such as **Nginx** (`client_max_body_size`)
+
+Example ASP.NET Core configuration:
+
+```csharp
+using Microsoft.AspNetCore.Http.Features;
+
+public override void ConfigureServices(ServiceConfigurationContext context)
+{
+ Configure(options =>
+ {
+ options.MaxFileSize = 50 * 1024 * 1024;
+ });
+
+ Configure(options =>
+ {
+ options.MultipartBodyLengthLimit = 50 * 1024 * 1024;
+ });
+}
+```
+
+```csharp
+builder.WebHost.ConfigureKestrel(options =>
+{
+ options.Limits.MaxRequestBodySize = 50 * 1024 * 1024;
+});
+```
+
+Example IIS configuration in `web.config`:
+
+```xml
+
+
+
+
+
+
+
+
+
+```
+
+Example Nginx configuration:
+
+```nginx
+server {
+ client_max_body_size 50M;
+}
+```
+
+If you are hosting behind another proxy or gateway (for example Apache, YARP, Azure App Gateway, Cloudflare, or Kubernetes ingress), ensure its request-body limit is also greater than or equal to the configured `MaxFileSize`.
+
## Permissions
The AI Management module defines the following permissions:
diff --git a/docs/en/framework/infrastructure/operation-rate-limiting.md b/docs/en/modules/operation-rate-limiting.md
similarity index 62%
rename from docs/en/framework/infrastructure/operation-rate-limiting.md
rename to docs/en/modules/operation-rate-limiting.md
index 5208c2c959..f285793695 100644
--- a/docs/en/framework/infrastructure/operation-rate-limiting.md
+++ b/docs/en/modules/operation-rate-limiting.md
@@ -1,11 +1,13 @@
````json
//[doc-seo]
{
- "Description": "Learn how to use the Operation Rate Limiting module in ABP Framework to control the frequency of specific operations like SMS sending, login attempts, and resource-intensive tasks."
+ "Description": "Learn how to use the Operation Rate Limiting module (Pro) in ABP to control the frequency of specific operations like SMS sending, login attempts, and resource-intensive tasks."
}
````
-# Operation Rate Limiting
+# Operation Rate Limiting Module (Pro)
+
+> You must have an [ABP Team or a higher license](https://abp.io/pricing) to use this module.
ABP provides an operation rate limiting system that allows you to control the frequency of specific operations in your application. You may need operation rate limiting for several reasons:
@@ -15,15 +17,9 @@ ABP provides an operation rate limiting system that allows you to control the fr
> This is not for [ASP.NET Core's built-in rate limiting middleware](https://learn.microsoft.com/en-us/aspnet/core/performance/rate-limit) which works at the HTTP request pipeline level. This module works at the **application/domain code level** and is called explicitly from your services. See the [Combining with ASP.NET Core Rate Limiting](#combining-with-aspnet-core-rate-limiting) section for a comparison.
-## Installation
-
-You can open a command-line terminal and type the following command to install the [Volo.Abp.OperationRateLimiting](https://www.nuget.org/packages/Volo.Abp.OperationRateLimiting) package into your project:
+## How to Install
-````bash
-abp add-package Volo.Abp.OperationRateLimiting
-````
-
-> If you haven't done it yet, you first need to install the [ABP CLI](../../../cli).
+This module is used by the [Account (Pro)](account-pro.md) module internally and comes pre-installed in the latest [startup templates](../solution-templates). So, no need to manually install it.
## Quick Start
@@ -31,7 +27,7 @@ This section shows the basic usage of the operation rate limiting system with a
### Defining a Policy
-First, define a rate limiting policy in the `ConfigureServices` method of your [module class](../../architecture/modularity/basics.md):
+First, define a rate limiting policy in the `ConfigureServices` method of your [module class](../framework/architecture/modularity/basics.md):
````csharp
Configure(options =>
@@ -62,7 +58,7 @@ public class SmsAppService : ApplicationService
_rateLimitChecker = rateLimitChecker;
}
- public async Task SendCodeAsync(string phoneNumber)
+ public virtual async Task SendCodeAsync(string phoneNumber)
{
await _rateLimitChecker.CheckAsync("SendSmsCode", phoneNumber);
@@ -78,9 +74,120 @@ public class SmsAppService : ApplicationService
That's the basic usage. The following sections explain each concept in detail.
+## Declarative Usage (Attribute)
+
+Instead of injecting `IOperationRateLimitingChecker` manually, you can use the `[OperationRateLimiting]` attribute to enforce a policy declaratively on Application Service methods or MVC Controller actions.
+
+> **Application Services** are handled by the ABP interceptor (built into the Domain layer).
+> **MVC Controllers** are handled by `AbpOperationRateLimitingActionFilter`, which is automatically registered when you reference the `Volo.Abp.OperationRateLimiting.AspNetCore` package.
+
+### Applying to an Application Service
+
+````csharp
+public class SmsAppService : ApplicationService
+{
+ [OperationRateLimiting("SendSmsCode")]
+ public virtual async Task SendCodeAsync([RateLimitingParameter] string phoneNumber)
+ {
+ // Rate limit is checked automatically before this line executes.
+ await _smsSender.SendAsync(phoneNumber, GenerateCode());
+ }
+}
+````
+
+### Applying to an MVC Controller
+
+````csharp
+[Route("api/account")]
+public class AccountController : AbpController
+{
+ [HttpPost("send-sms-code")]
+ [OperationRateLimiting("SendSmsCode")]
+ public async Task SendSmsCodeAsync([RateLimitingParameter] string phoneNumber)
+ {
+ // Rate limit is checked automatically before this line executes.
+ await _smsSender.SendAsync(phoneNumber, GenerateCode());
+ return Ok();
+ }
+}
+````
+
+### Resolving the Parameter Value
+
+The `[OperationRateLimiting]` attribute resolves `OperationRateLimitingContext.Parameter` automatically using the following priority order:
+
+1. **`[RateLimitingParameter]`** — a method parameter marked with this attribute. Its `ToString()` value is used as the partition key.
+2. **`IHasOperationRateLimitingParameter`** — a method parameter whose type implements this interface. The value returned by `GetPartitionParameter()` is used as the partition key.
+3. **`null`** — no parameter is resolved; suitable for policies that use `PartitionByCurrentUser`, `PartitionByClientIp`, etc.
+
+#### Using `[RateLimitingParameter]`
+
+Mark a single parameter to use its value as the partition key:
+
+````csharp
+[OperationRateLimiting("SendSmsCode")]
+public virtual async Task SendCodeAsync([RateLimitingParameter] string phoneNumber)
+{
+ // partition key = phoneNumber
+}
+````
+
+#### Using `IHasOperationRateLimitingParameter`
+
+Implement the interface on an input DTO when the partition key is a property of the DTO:
+
+````csharp
+public class SendSmsCodeInput : IHasOperationRateLimitingParameter
+{
+ public string PhoneNumber { get; set; }
+ public string Language { get; set; }
+
+ public string? GetPartitionParameter() => PhoneNumber;
+}
+````
+
+````csharp
+[OperationRateLimiting("SendSmsCode")]
+public virtual async Task SendCodeAsync(SendSmsCodeInput input)
+{
+ // partition key = input.GetPartitionParameter() = input.PhoneNumber
+}
+````
+
+#### No Partition Parameter
+
+If no parameter is marked and no DTO implements the interface, the policy is checked without a `Parameter` value. This is appropriate for policies that use `PartitionByCurrentUser`, `PartitionByClientIp`, or `PartitionByCurrentTenant`:
+
+````csharp
+// Policy uses PartitionByCurrentUser — no explicit parameter needed.
+[OperationRateLimiting("GenerateReport")]
+public virtual async Task GenerateMonthlyReportAsync()
+{
+ // Rate limit is checked per current user automatically.
+}
+````
+
+> If the method has parameters but none is resolved, a **warning log** is emitted to help you catch misconfigured usages early.
+
+### Applying to a Class
+
+You can also place `[OperationRateLimiting]` on the class to apply it to **all public methods** of that class:
+
+````csharp
+[OperationRateLimiting("MyServiceLimit")]
+public class MyAppService : ApplicationService
+{
+ public virtual async Task MethodAAsync([RateLimitingParameter] string key) { ... }
+
+ public virtual async Task MethodBAsync([RateLimitingParameter] string key) { ... }
+}
+````
+
+> A method-level attribute takes precedence over the class-level attribute.
+
## Defining Policies
-Policies are defined using `AbpOperationRateLimitingOptions` in the `ConfigureServices` method of your [module class](../../architecture/modularity/basics.md). Each policy has a unique name, one or more rules, and a partition strategy.
+Policies are defined using `AbpOperationRateLimitingOptions` in the `ConfigureServices` method of your [module class](../framework/architecture/modularity/basics.md). Each policy has a unique name, one or more rules, and a partition strategy.
### Single-Rule Policies
@@ -115,6 +222,78 @@ options.AddPolicy("Login", policy =>
> When multiple rules are present, the module uses a **two-phase check**: it first verifies all rules without incrementing counters, then increments only if all rules pass. This prevents wasted quota when one rule would block the request.
+### Overriding an Existing Policy
+
+If a reusable module (e.g., ABP's Account module) defines a policy with default rules, you have two ways to customize it in your own module's `ConfigureServices`.
+
+**Option 1 — Full replacement with `AddPolicy`:**
+
+Call `AddPolicy` with the same name. The last registration wins and completely replaces all rules:
+
+````csharp
+// In your application module — runs after the Account module
+Configure(options =>
+{
+ options.AddPolicy("Account.SendPasswordResetCode", policy =>
+ {
+ // Replaces all rules defined by the Account module for this policy
+ policy.AddRule(rule => rule
+ .WithFixedWindow(TimeSpan.FromMinutes(5), maxCount: 3)
+ .PartitionByEmail());
+ });
+});
+````
+
+> `AddPolicy` stores policies in a dictionary keyed by name, so calling it again with the same name fully replaces the previous policy and all its rules.
+
+**Option 2 — Partial modification with `ConfigurePolicy`:**
+
+Use `ConfigurePolicy` to modify an existing policy without replacing it entirely. The builder is pre-populated with the existing rules, so you only need to express what changes:
+
+````csharp
+Configure(options =>
+{
+ // Only override the error code, keeping the module's original rules
+ options.ConfigurePolicy("Account.SendPasswordResetCode", policy =>
+ {
+ policy.WithErrorCode("MyApp:SmsCodeLimit");
+ });
+});
+````
+
+You can also add a rule on top of the existing ones:
+
+````csharp
+options.ConfigurePolicy("Account.SendPasswordResetCode", policy =>
+{
+ // Keep the module's per-email rule and add a per-IP rule on top
+ policy.AddRule(rule => rule
+ .WithFixedWindow(TimeSpan.FromHours(1), maxCount: 20)
+ .PartitionByClientIp());
+});
+````
+
+Or clear all inherited rules first and define entirely new ones using `ClearRules()`:
+
+````csharp
+options.ConfigurePolicy("Account.SendPasswordResetCode", policy =>
+{
+ policy.ClearRules()
+ .WithFixedWindow(TimeSpan.FromMinutes(5), maxCount: 3)
+ .PartitionByEmail();
+});
+````
+
+`ConfigurePolicy` returns `AbpOperationRateLimitingOptions`, so you can chain multiple calls:
+
+````csharp
+options
+ .ConfigurePolicy("Account.SendPasswordResetCode", p => p.WithErrorCode("MyApp:SmsLimit"))
+ .ConfigurePolicy("Account.Login", p => p.WithErrorCode("MyApp:LoginLimit"));
+````
+
+> `ConfigurePolicy` throws `AbpException` if the policy name is not found. Use `AddPolicy` first (in the module that owns the policy), then `ConfigurePolicy` in downstream modules to customize it.
+
### Custom Error Code
By default, the exception uses the error code `Volo.Abp.OperationRateLimiting:010001`. You can override it per policy:
@@ -195,14 +374,59 @@ Works the same way as `PartitionByEmail`: resolves from `context.Parameter` firs
### Custom Partition (PartitionBy)
-You can provide a custom async function to generate the partition key. The async signature allows you to perform database queries or other I/O operations:
+You can register a named custom resolver to generate the partition key. The resolver is an async function, so you can perform database queries or other I/O operations. Because the resolver is stored by name (not as an anonymous delegate), it can be serialized and managed from a UI or database.
+
+**Step 1 — Register the resolver by name:**
+
+````csharp
+Configure(options =>
+{
+ options.AddPartitionKeyResolver("ByDevice", ctx =>
+ Task.FromResult($"{ctx.Parameter}:{ctx.ExtraProperties["DeviceId"]}"));
+});
+````
+
+**Step 2 — Reference it in a policy:**
+
+````csharp
+policy.WithFixedWindow(TimeSpan.FromHours(1), maxCount: 100)
+ .PartitionBy("ByDevice");
+````
+
+You can also register and reference in one step (inline):
````csharp
policy.WithFixedWindow(TimeSpan.FromHours(1), maxCount: 100)
- .PartitionBy(ctx => Task.FromResult(
- $"{ctx.Parameter}:{ctx.ExtraProperties["DeviceId"]}"));
+ .PartitionBy("ByDevice", ctx =>
+ Task.FromResult($"{ctx.Parameter}:{ctx.ExtraProperties["DeviceId"]}"));
+````
+
+> If you call `PartitionBy("name")` with a resolver name that hasn't been registered, an exception is thrown at configuration time (not at runtime), so typos are caught early.
+
+To replace an existing resolver (e.g., in a downstream module), use `ReplacePartitionKeyResolver`:
+
+````csharp
+options.ReplacePartitionKeyResolver("ByDevice", ctx =>
+ Task.FromResult($"v2:{ctx.Parameter}:{ctx.ExtraProperties["DeviceId"]}"));
+````
+
+### Named Rules (WithName)
+
+By default, a rule's store key is derived from its `Duration`, `MaxCount`, and `PartitionType`. This means that if you change a rule's parameters (e.g., increase `maxCount` from 5 to 10), the counter resets because the key changes.
+
+To keep a stable key across parameter changes, give the rule a name:
+
+````csharp
+policy.AddRule(rule => rule
+ .WithName("HourlyLimit")
+ .WithFixedWindow(TimeSpan.FromHours(1), maxCount: 100)
+ .PartitionByCurrentUser());
````
+When a name is set, it is used as the store key instead of the content-based descriptor. This is particularly useful when rules are managed from a database or UI — changing the `maxCount` or `duration` will not reset existing counters.
+
+> Rule names must be unique within a policy. Duplicate names cause an exception at build time.
+
## Multi-Tenancy
By default, partition keys do not include tenant information — for partition types like `PartitionByParameter`, `PartitionByCurrentUser`, `PartitionByClientIp`, etc., counters are shared across tenants unless you call `WithMultiTenancy()`. Note that `PartitionByCurrentTenant()` is inherently per-tenant since the partition key is the tenant ID itself, and `PartitionByClientIp()` is typically kept global since the same IP should share a counter regardless of tenant.
@@ -434,7 +658,7 @@ This module and ASP.NET Core's built-in [rate limiting middleware](https://learn
|---|---|---|
| **Level** | HTTP request pipeline | Application/domain code |
| **Scope** | All incoming requests | Specific business operations |
-| **Usage** | Middleware (automatic) | Explicit `CheckAsync` calls |
+| **Usage** | Middleware (automatic) | `[OperationRateLimiting]` attribute or explicit `CheckAsync` calls |
| **Typical use** | API throttling, DDoS protection | Business logic limits (SMS, reports) |
A common pattern is to use ASP.NET Core middleware for broad API protection and this module for fine-grained business operation limits.
@@ -467,7 +691,7 @@ public class MyCustomStore : IOperationRateLimitingStore, ITransientDependency
}
````
-ABP's [dependency injection](../../fundamentals/dependency-injection.md) system will automatically use your implementation since it replaces the default one.
+ABP's [dependency injection](../framework/fundamentals/dependency-injection.md) system will automatically use your implementation since it replaces the default one.
### Custom Rule
@@ -485,8 +709,33 @@ Replace `IOperationRateLimitingFormatter` to customize how time durations are di
Replace `IOperationRateLimitingPolicyProvider` to load policies from a database or external configuration source instead of the in-memory options.
+When loading pre-built policies from an external source, use the `AddPolicy` overload that accepts an `OperationRateLimitingPolicy` object directly (bypassing the builder):
+
+````csharp
+options.AddPolicy(new OperationRateLimitingPolicy
+{
+ Name = "DynamicPolicy",
+ Rules =
+ [
+ new OperationRateLimitingRuleDefinition
+ {
+ Name = "HourlyLimit",
+ Duration = TimeSpan.FromHours(1),
+ MaxCount = 100,
+ PartitionType = OperationRateLimitingPartitionType.CurrentUser
+ }
+ ]
+});
+````
+
+To remove a policy (e.g., when it is deleted from the database), use `RemovePolicy`:
+
+````csharp
+options.RemovePolicy("DynamicPolicy");
+````
+
## See Also
* [ASP.NET Core Rate Limiting Middleware](https://learn.microsoft.com/en-us/aspnet/core/performance/rate-limit)
-* [Distributed Caching](../fundamentals/caching.md)
-* [Exception Handling](../fundamentals/exception-handling.md)
+* [Distributed Caching](../framework/fundamentals/caching.md)
+* [Exception Handling](../framework/fundamentals/exception-handling.md)
diff --git a/docs/en/package-version-changes.md b/docs/en/package-version-changes.md
index 3dad0d2f7b..02b76c3be3 100644
--- a/docs/en/package-version-changes.md
+++ b/docs/en/package-version-changes.md
@@ -1,5 +1,19 @@
# Package Version Changes
+## 10.3.0-rc.1
+
+| Package | Old Version | New Version | PR |
+|---------|-------------|-------------|-----|
+| Microsoft.IdentityModel.JsonWebTokens | 8.14.0 | 8.16.0 | #25068 |
+| Microsoft.IdentityModel.Protocols.OpenIdConnect | 8.14.0 | 8.16.0 | #25068 |
+| Microsoft.IdentityModel.Tokens | 8.14.0 | 8.16.0 | #25068 |
+| MongoDB.Driver | 3.7.0 | 3.7.1 | #25114 |
+| System.IdentityModel.Tokens.Jwt | 8.14.0 | 8.16.0 | #25068 |
+| TickerQ | 10.1.1 | 10.2.0 | #25091 |
+| TickerQ.Dashboard | 10.1.1 | 10.2.0 | #25091 |
+| TickerQ.EntityFrameworkCore | 10.1.1 | 10.2.0 | #25091 |
+| TickerQ.Utilities | 10.1.1 | 10.2.0 | #25091 |
+
## 10.3.0-preview
| Package | Old Version | New Version | PR |
diff --git a/framework/Volo.Abp.slnx b/framework/Volo.Abp.slnx
index 1e36f1d212..1302600c09 100644
--- a/framework/Volo.Abp.slnx
+++ b/framework/Volo.Abp.slnx
@@ -169,7 +169,6 @@
-
@@ -257,6 +256,5 @@
-
diff --git a/framework/src/Volo.Abp.AspNetCore.Components.WebAssembly/Volo/Abp/AspNetCore/Components/WebAssembly/AbpBlazorClientHttpMessageHandler.cs b/framework/src/Volo.Abp.AspNetCore.Components.WebAssembly/Volo/Abp/AspNetCore/Components/WebAssembly/AbpBlazorClientHttpMessageHandler.cs
index 88855e3fc5..5e1ce7545c 100644
--- a/framework/src/Volo.Abp.AspNetCore.Components.WebAssembly/Volo/Abp/AspNetCore/Components/WebAssembly/AbpBlazorClientHttpMessageHandler.cs
+++ b/framework/src/Volo.Abp.AspNetCore.Components.WebAssembly/Volo/Abp/AspNetCore/Components/WebAssembly/AbpBlazorClientHttpMessageHandler.cs
@@ -53,7 +53,10 @@ public class AbpBlazorClientHttpMessageHandler : DelegatingHandler, ITransientDe
options.Type = UiPageProgressType.Info;
});
- request.SetBrowserRequestStreamingEnabled(true);
+ if (request.RequestUri?.Scheme == Uri.UriSchemeHttps)
+ {
+ request.SetBrowserRequestStreamingEnabled(true);
+ }
await SetLanguageAsync(request, cancellationToken);
await SetAntiForgeryTokenAsync(request);
await SetTimeZoneAsync(request);
diff --git a/framework/src/Volo.Abp.AspNetCore.Mvc.UI.Bootstrap/TagHelpers/Form/AbpInputTagHelperService.cs b/framework/src/Volo.Abp.AspNetCore.Mvc.UI.Bootstrap/TagHelpers/Form/AbpInputTagHelperService.cs
index 1fb6f806e1..f40ff1030b 100644
--- a/framework/src/Volo.Abp.AspNetCore.Mvc.UI.Bootstrap/TagHelpers/Form/AbpInputTagHelperService.cs
+++ b/framework/src/Volo.Abp.AspNetCore.Mvc.UI.Bootstrap/TagHelpers/Form/AbpInputTagHelperService.cs
@@ -54,7 +54,7 @@ public class AbpInputTagHelperService : AbpTagHelperService
output.TagMode = TagMode.StartTagAndEndTag;
output.TagName = "div";
LeaveOnlyGroupAttributes(context, output);
- if (!IsOutputHidden(output))
+ if (!IsInputHidden(context))
{
if (TagHelper.FloatingLabel && !isCheckBox)
{
@@ -86,6 +86,7 @@ public class AbpInputTagHelperService : AbpTagHelperService
protected virtual async Task<(string, bool)> GetFormInputGroupAsHtmlAsync(TagHelperContext context, TagHelperOutput output)
{
var (inputTag, isCheckBox) = await GetInputTagHelperOutputAsync(context, output);
+ context.Items[nameof(IsOutputHidden)] = IsOutputHidden(inputTag);
var inputHtml = inputTag.Render(_encoder);
var label = await GetLabelAsHtmlAsync(context, output, inputTag, isCheckBox);
@@ -124,7 +125,8 @@ public class AbpInputTagHelperService : AbpTagHelperService
protected virtual string SurroundInnerHtmlAndGet(TagHelperContext context, TagHelperOutput output, string innerHtml, bool isCheckbox)
{
- var mb = TagHelper.AddMarginBottomClass ? (isCheckbox ? "mb-2" : "mb-3") : string.Empty;
+ var isHidden = IsInputHidden(context);
+ var mb = !isHidden && TagHelper.AddMarginBottomClass ? (isCheckbox ? "mb-2" : "mb-3") : string.Empty;
return "