@ -0,0 +1,406 @@ |
|||
#!/usr/bin/env python3 |
|||
""" |
|||
Comprehensive test suite for update_dependency_changes.py |
|||
|
|||
Tests cover: |
|||
- Basic update/add/remove scenarios |
|||
- Version revert scenarios |
|||
- Complex multi-step change sequences |
|||
- Edge cases and duplicate operations |
|||
- Document format validation |
|||
""" |
|||
|
|||
import sys |
|||
import os |
|||
sys.path.insert(0, os.path.dirname(__file__)) |
|||
|
|||
from update_dependency_changes import merge_changes, render_section |
|||
|
|||
|
|||
def test_update_then_revert(): |
|||
"""Test: PR1 updates A->B, PR2 reverts B->A. Should be removed.""" |
|||
print("Test 1: Update then revert") |
|||
existing = ( |
|||
{"PackageA": ("1.0.0", "2.0.0", "#1")}, # updated |
|||
{}, # added |
|||
{} # removed |
|||
) |
|||
new = ( |
|||
{"PackageA": ("2.0.0", "1.0.0", "#2")}, # updated back |
|||
{}, |
|||
{} |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageA" not in updated, f"Expected PackageA removed, got: {updated}" |
|||
assert len(added) == 0 and len(removed) == 0 |
|||
print("✓ Passed: Package correctly removed from updates\n") |
|||
|
|||
|
|||
def test_add_then_remove_same_version(): |
|||
"""Test: PR1 adds v1.0, PR2 removes v1.0. Should be completely removed.""" |
|||
print("Test 2: Add then remove same version") |
|||
existing = ( |
|||
{}, |
|||
{"PackageB": ("1.0.0", "#1")}, # added |
|||
{} |
|||
) |
|||
new = ( |
|||
{}, |
|||
{}, |
|||
{"PackageB": ("1.0.0", "#2")} # removed |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageB" not in added, f"Expected PackageB removed from added, got: {added}" |
|||
assert "PackageB" not in removed, f"Expected PackageB removed from removed, got: {removed}" |
|||
assert "PackageB" not in updated |
|||
print("✓ Passed: Package correctly removed from all sections\n") |
|||
|
|||
|
|||
def test_remove_then_add_same_version(): |
|||
"""Test: PR1 removes v1.0, PR2 adds v1.0. Should be removed.""" |
|||
print("Test 3: Remove then add same version") |
|||
existing = ( |
|||
{}, |
|||
{}, |
|||
{"PackageC": ("1.0.0", "#1")} # removed |
|||
) |
|||
new = ( |
|||
{}, |
|||
{"PackageC": ("1.0.0", "#2")}, # added back |
|||
{} |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageC" not in updated, f"Expected PackageC removed from updated, got: {updated}" |
|||
assert "PackageC" not in added, f"Expected PackageC removed from added, got: {added}" |
|||
assert "PackageC" not in removed, f"Expected PackageC removed from removed, got: {removed}" |
|||
print("✓ Passed: Package correctly removed from all sections\n") |
|||
|
|||
|
|||
def test_add_then_remove_different_version(): |
|||
"""Test: PR1 adds v1.0, PR2 removes v2.0. Should show as removed v2.0.""" |
|||
print("Test 4: Add then remove different version") |
|||
existing = ( |
|||
{}, |
|||
{"PackageD": ("1.0.0", "#1")}, # added |
|||
{} |
|||
) |
|||
new = ( |
|||
{}, |
|||
{}, |
|||
{"PackageD": ("2.0.0", "#2")} # removed different version |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageD" not in added, f"Expected PackageD removed from added, got: {added}" |
|||
assert "PackageD" in removed, f"Expected PackageD in removed, got: {removed}" |
|||
assert removed["PackageD"][0] == "2.0.0", f"Expected version 2.0.0, got: {removed['PackageD']}" |
|||
print(f"✓ Passed: Package correctly tracked as removed with version {removed['PackageD'][0]}\n") |
|||
|
|||
|
|||
def test_update_in_added(): |
|||
"""Test: PR1 adds v1.0, PR2 updates to v2.0. Should show as updated 1.0->2.0.""" |
|||
print("Test 5: Update a package that was added") |
|||
existing = ( |
|||
{}, |
|||
{"PackageE": ("1.0.0", "#1")}, # added |
|||
{} |
|||
) |
|||
new = ( |
|||
{"PackageE": ("1.0.0", "2.0.0", "#2")}, # updated |
|||
{}, |
|||
{} |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageE" not in added, f"Expected PackageE removed from added, got: {added}" |
|||
assert "PackageE" in updated, f"Expected PackageE in updated, got: {updated}" |
|||
assert updated["PackageE"] == ("1.0.0", "2.0.0", "#1, #2"), \ |
|||
f"Expected ('1.0.0', '2.0.0', '#1, #2'), got: {updated['PackageE']}" |
|||
print(f"✓ Passed: Package correctly converted to updated: {updated['PackageE']}\n") |
|||
|
|||
|
|||
def test_multiple_updates(): |
|||
"""Test: PR1 updates A->B, PR2 updates B->C. Should show A->C.""" |
|||
print("Test 6: Multiple updates") |
|||
existing = ( |
|||
{"PackageF": ("1.0.0", "2.0.0", "#1")}, # updated |
|||
{}, |
|||
{} |
|||
) |
|||
new = ( |
|||
{"PackageF": ("2.0.0", "3.0.0", "#2")}, # updated again |
|||
{}, |
|||
{} |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageF" in updated |
|||
assert updated["PackageF"] == ("1.0.0", "3.0.0", "#1, #2"), \ |
|||
f"Expected ('1.0.0', '3.0.0', '#1, #2'), got: {updated['PackageF']}" |
|||
print(f"✓ Passed: Package correctly shows full range: {updated['PackageF']}\n") |
|||
|
|||
|
|||
def test_multiple_updates_back_to_original(): |
|||
"""Test: PR1 updates 1->2, PR2 updates 2->3, PR3 updates 3->1. Should be removed.""" |
|||
print("Test 7: Multiple updates ending back at original version") |
|||
# Simulate PR1 and PR2 already merged |
|||
existing = ( |
|||
{"PackageG": ("1.0.0", "3.0.0", "#1, #2")}, # updated through PR1 and PR2 |
|||
{}, |
|||
{} |
|||
) |
|||
# PR3 changes back to 1.0.0 |
|||
new = ( |
|||
{"PackageG": ("3.0.0", "1.0.0", "#3")}, # updated back to original |
|||
{}, |
|||
{} |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageG" not in updated, f"Expected PackageG removed, got: {updated}" |
|||
assert len(added) == 0 and len(removed) == 0 |
|||
print("✓ Passed: Package correctly removed (version returned to original)\n") |
|||
|
|||
|
|||
def test_update_remove_add_same_version(): |
|||
"""Test: PR1 updates 1->2, PR2 updates 2->3, PR3 removes, PR4 adds v3. Should show updated 1->3.""" |
|||
print("Test 8: Update-Update-Remove-Add same version") |
|||
# After PR1, PR2, PR3 |
|||
existing = ( |
|||
{}, |
|||
{}, |
|||
{"PackageH": ("1.0.0", "#1, #2, #3")} # removed (original was 1.0.0) |
|||
) |
|||
# PR4 adds back the same version that was removed |
|||
new = ( |
|||
{}, |
|||
{"PackageH": ("3.0.0", "#4")}, # added |
|||
{} |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageH" in updated, f"Expected PackageH in updated, got: updated={updated}, added={added}, removed={removed}" |
|||
assert updated["PackageH"] == ("1.0.0", "3.0.0", "#1, #2, #3, #4"), \ |
|||
f"Expected ('1.0.0', '3.0.0', '#1, #2, #3, #4'), got: {updated['PackageH']}" |
|||
print(f"✓ Passed: Package correctly shows as updated: {updated['PackageH']}\n") |
|||
|
|||
|
|||
def test_update_remove_add_original_version(): |
|||
"""Test: PR1 updates 1->2, PR2 updates 2->3, PR3 removes, PR4 adds v1. Should be removed.""" |
|||
print("Test 9: Update-Update-Remove-Add original version") |
|||
# After PR1, PR2, PR3 |
|||
existing = ( |
|||
{}, |
|||
{}, |
|||
{"PackageI": ("1.0.0", "#1, #2, #3")} # removed (original was 1.0.0) |
|||
) |
|||
# PR4 adds back the original version |
|||
new = ( |
|||
{}, |
|||
{"PackageI": ("1.0.0", "#4")}, # added back to original |
|||
{} |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageI" not in updated, f"Expected PackageI removed, got: updated={updated}" |
|||
assert "PackageI" not in added, f"Expected PackageI removed, got: added={added}" |
|||
assert "PackageI" not in removed, f"Expected PackageI removed, got: removed={removed}" |
|||
print("✓ Passed: Package correctly removed (added back to original version)\n") |
|||
|
|||
|
|||
def test_update_remove_add_different_version(): |
|||
"""Test: PR1 updates 1->2, PR2 updates 2->3, PR3 removes, PR4 adds v4. Should show updated 1->4.""" |
|||
print("Test 10: Update-Update-Remove-Add different version") |
|||
# After PR1, PR2, PR3 |
|||
existing = ( |
|||
{}, |
|||
{}, |
|||
{"PackageJ": ("1.0.0", "#1, #2, #3")} # removed (original was 1.0.0) |
|||
) |
|||
# PR4 adds a completely different version |
|||
new = ( |
|||
{}, |
|||
{"PackageJ": ("4.0.0", "#4")}, # added new version |
|||
{} |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageJ" in updated, f"Expected PackageJ in updated, got: updated={updated}, added={added}, removed={removed}" |
|||
assert updated["PackageJ"] == ("1.0.0", "4.0.0", "#1, #2, #3, #4"), \ |
|||
f"Expected ('1.0.0', '4.0.0', '#1, #2, #3, #4'), got: {updated['PackageJ']}" |
|||
print(f"✓ Passed: Package correctly shows as updated: {updated['PackageJ']}\n") |
|||
|
|||
|
|||
def test_add_update_remove(): |
|||
"""Test: PR1 adds v1, PR2 updates to v2, PR3 removes v2. Should be completely removed.""" |
|||
print("Test 11: Add-Update-Remove") |
|||
# After PR1 and PR2 |
|||
existing = ( |
|||
{"PackageK": ("1.0.0", "2.0.0", "#1, #2")}, # updated (was added in PR1, updated in PR2) |
|||
{}, |
|||
{} |
|||
) |
|||
# PR3 removes v2 |
|||
new = ( |
|||
{}, |
|||
{}, |
|||
{"PackageK": ("2.0.0", "#3")} # removed |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageK" not in updated, f"Expected PackageK removed from updated, got: {updated}" |
|||
assert "PackageK" not in added, f"Expected PackageK removed from added, got: {added}" |
|||
assert "PackageK" in removed, f"Expected PackageK in removed, got: {removed}" |
|||
# The removed should track from the original first version |
|||
assert removed["PackageK"][0] == "1.0.0", f"Expected removed from 1.0.0, got: {removed['PackageK']}" |
|||
print(f"✓ Passed: Package correctly shows as removed from original: {removed['PackageK']}\n") |
|||
|
|||
|
|||
def test_add_remove_add_same_version(): |
|||
"""Test: PR1 adds v1, PR2 removes v1, PR3 adds v1 again. Should show as added v1.""" |
|||
print("Test 12: Add-Remove-Add same version") |
|||
# After PR1 and PR2 (added then removed) |
|||
existing = ( |
|||
{}, |
|||
{}, |
|||
{} # Completely removed after PR2 |
|||
) |
|||
# PR3 adds v1 again |
|||
new = ( |
|||
{}, |
|||
{"PackageL": ("1.0.0", "#3")}, # added |
|||
{} |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageL" in added, f"Expected PackageL in added, got: added={added}" |
|||
assert added["PackageL"] == ("1.0.0", "#3"), f"Expected ('1.0.0', '#3'), got: {added['PackageL']}" |
|||
print(f"✓ Passed: Package correctly shows as added: {added['PackageL']}\n") |
|||
|
|||
|
|||
def test_update_remove_remove(): |
|||
"""Test: PR1 updates 1->2, PR2 removes v2, PR3 tries to remove again. Should show removed from v1.""" |
|||
print("Test 13: Update-Remove (duplicate remove)") |
|||
# After PR1 and PR2 |
|||
existing = ( |
|||
{}, |
|||
{}, |
|||
{"PackageM": ("1.0.0", "#1, #2")} # removed (original was 1.0.0) |
|||
) |
|||
# PR3 tries to remove again (edge case, might not happen in practice) |
|||
new = ( |
|||
{}, |
|||
{}, |
|||
{"PackageM": ("1.0.0", "#3")} # removed again |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageM" in removed, f"Expected PackageM in removed, got: {removed}" |
|||
# Should keep the original information |
|||
assert removed["PackageM"][0] == "1.0.0", f"Expected removed from 1.0.0, got: {removed['PackageM']}" |
|||
print(f"✓ Passed: Package correctly maintains removed state: {removed['PackageM']}\n") |
|||
|
|||
|
|||
def test_add_add(): |
|||
"""Test: PR1 adds v1, PR2 adds v2 (version changed externally). Should show added v2.""" |
|||
print("Test 14: Add-Add (version changed between PRs)") |
|||
# After PR1 |
|||
existing = ( |
|||
{}, |
|||
{"PackageN": ("1.0.0", "#1")}, # added |
|||
{} |
|||
) |
|||
# PR2 adds different version (edge case) |
|||
new = ( |
|||
{}, |
|||
{"PackageN": ("2.0.0", "#2")}, # added different version |
|||
{} |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageN" in added, f"Expected PackageN in added, got: {added}" |
|||
assert added["PackageN"][0] == "2.0.0", f"Expected version 2.0.0, got: {added['PackageN']}" |
|||
print(f"✓ Passed: Package correctly shows latest added version: {added['PackageN']}\n") |
|||
|
|||
|
|||
def test_complex_chain_ending_in_original(): |
|||
"""Test: Complex chain - Add v1, Update to v2, Remove, Add v2, Update to v1. Should be removed.""" |
|||
print("Test 15: Complex chain ending at nothing changed") |
|||
# After PR1 (add), PR2 (update), PR3 (remove), PR4 (add back) |
|||
existing = ( |
|||
{"PackageO": ("1.0.0", "2.0.0", "#1, #2, #3, #4")}, # Complex history |
|||
{}, |
|||
{} |
|||
) |
|||
# PR5 updates back to v1 (original from perspective of first state) |
|||
new = ( |
|||
{"PackageO": ("2.0.0", "1.0.0", "#5")}, # back to start |
|||
{}, |
|||
{} |
|||
) |
|||
updated, added, removed = merge_changes(existing, new) |
|||
assert "PackageO" not in updated, f"Expected PackageO removed, got: {updated}" |
|||
print(f"✓ Passed: Complex chain correctly removed when ending at original\n") |
|||
|
|||
|
|||
def test_document_format(): |
|||
"""Test: Verify the document rendering format.""" |
|||
print("Test 16: Document format validation") |
|||
|
|||
updated = { |
|||
"Microsoft.Extensions.Logging": ("8.0.0", "8.0.1", "#123"), |
|||
"Newtonsoft.Json": ("13.0.1", "13.0.3", "#456, #789"), |
|||
} |
|||
|
|||
added = { |
|||
"Azure.Identity": ("1.10.0", "#567"), |
|||
} |
|||
|
|||
removed = { |
|||
"System.Text.Json": ("7.0.0", "#890"), |
|||
} |
|||
|
|||
document = render_section("9.0.0", updated, added, removed) |
|||
|
|||
# Verify document structure |
|||
assert "## 9.0.0" in document, "Version header missing" |
|||
assert "| Package | Old Version | New Version | PR |" in document, "Updated table header missing" |
|||
assert "Microsoft.Extensions.Logging" in document, "Updated package missing" |
|||
assert "**Added:**" in document, "Added section missing" |
|||
assert "Azure.Identity" in document, "Added package missing" |
|||
assert "**Removed:**" in document, "Removed section missing" |
|||
assert "System.Text.Json" in document, "Removed package missing" |
|||
|
|||
print("✓ Passed: Document format is correct") |
|||
print("\nSample output:") |
|||
print("-" * 60) |
|||
print(document) |
|||
print("-" * 60 + "\n") |
|||
|
|||
|
|||
def run_all_tests(): |
|||
"""Run all test cases.""" |
|||
print("=" * 70) |
|||
print("Testing update_dependency_changes.py") |
|||
print("=" * 70 + "\n") |
|||
|
|||
test_update_then_revert() |
|||
test_add_then_remove_same_version() |
|||
test_remove_then_add_same_version() |
|||
test_add_then_remove_different_version() |
|||
test_update_in_added() |
|||
test_multiple_updates() |
|||
test_multiple_updates_back_to_original() |
|||
test_update_remove_add_same_version() |
|||
test_update_remove_add_original_version() |
|||
test_update_remove_add_different_version() |
|||
test_add_update_remove() |
|||
test_add_remove_add_same_version() |
|||
test_update_remove_remove() |
|||
test_add_add() |
|||
test_complex_chain_ending_in_original() |
|||
test_document_format() |
|||
|
|||
print("=" * 70) |
|||
print("All 16 tests passed! ✓") |
|||
print("=" * 70) |
|||
print("\nTest coverage summary:") |
|||
print(" ✓ Basic scenarios (update, add, remove)") |
|||
print(" ✓ Version revert handling") |
|||
print(" ✓ Complex multi-step sequences") |
|||
print(" ✓ Edge cases and duplicates") |
|||
print(" ✓ Document format validation") |
|||
print("=" * 70) |
|||
|
|||
|
|||
if __name__ == "__main__": |
|||
run_all_tests() |
|||
@ -0,0 +1,331 @@ |
|||
import subprocess |
|||
import re |
|||
import os |
|||
import sys |
|||
import xml.etree.ElementTree as ET |
|||
|
|||
|
|||
HEADER = "# Package Version Changes\n" |
|||
DOC_PATH = os.environ.get("DOC_PATH", "docs/en/package-version-changes.md") |
|||
|
|||
|
|||
def get_version(): |
|||
"""Read the current version from common.props.""" |
|||
try: |
|||
tree = ET.parse("common.props") |
|||
root = tree.getroot() |
|||
version_elem = root.find(".//Version") |
|||
if version_elem is not None: |
|||
return version_elem.text |
|||
except FileNotFoundError: |
|||
print("Error: 'common.props' file not found.", file=sys.stderr) |
|||
except ET.ParseError as ex: |
|||
print(f"Error: Failed to parse 'common.props': {ex}", file=sys.stderr) |
|||
return None |
|||
|
|||
|
|||
def get_diff(base_ref): |
|||
"""Get diff of Directory.Packages.props against the base branch.""" |
|||
result = subprocess.run( |
|||
["git", "diff", f"origin/{base_ref}", "--", "Directory.Packages.props"], |
|||
capture_output=True, |
|||
text=True, |
|||
) |
|||
if result.returncode != 0: |
|||
raise RuntimeError( |
|||
f"Failed to get diff for base ref 'origin/{base_ref}': {result.stderr}" |
|||
) |
|||
return result.stdout |
|||
|
|||
|
|||
def get_existing_doc_from_base(base_ref): |
|||
"""Read the existing document from the base branch.""" |
|||
result = subprocess.run( |
|||
["git", "show", f"origin/{base_ref}:{DOC_PATH}"], |
|||
capture_output=True, |
|||
text=True, |
|||
) |
|||
if result.returncode == 0: |
|||
return result.stdout |
|||
return "" |
|||
|
|||
|
|||
def parse_diff_packages(lines, prefix): |
|||
"""Parse package versions from diff lines with the given prefix (+ or -).""" |
|||
packages = {} |
|||
# Use separate patterns to handle different attribute orders |
|||
include_pattern = re.compile(r'Include="([^"]+)"') |
|||
version_pattern = re.compile(r'Version="([^"]+)"') |
|||
for line in lines: |
|||
if line.startswith(prefix) and "PackageVersion" in line and not line.startswith(prefix * 3): |
|||
include_match = include_pattern.search(line) |
|||
version_match = version_pattern.search(line) |
|||
if include_match and version_match: |
|||
packages[include_match.group(1)] = version_match.group(1) |
|||
return packages |
|||
|
|||
|
|||
def classify_changes(old_packages, new_packages, pr_number): |
|||
"""Classify diff into updated, added, and removed with PR attribution.""" |
|||
updated = {} |
|||
added = {} |
|||
removed = {} |
|||
|
|||
all_packages = sorted(set(list(old_packages.keys()) + list(new_packages.keys()))) |
|||
|
|||
for pkg in all_packages: |
|||
if pkg in old_packages and pkg in new_packages: |
|||
if old_packages[pkg] != new_packages[pkg]: |
|||
updated[pkg] = (old_packages[pkg], new_packages[pkg], pr_number) |
|||
elif pkg in new_packages: |
|||
added[pkg] = (new_packages[pkg], pr_number) |
|||
else: |
|||
removed[pkg] = (old_packages[pkg], pr_number) |
|||
|
|||
return updated, added, removed |
|||
|
|||
|
|||
def parse_existing_section(section_text): |
|||
"""Parse an existing markdown section to extract package records with PR info.""" |
|||
updated = {} |
|||
added = {} |
|||
removed = {} |
|||
|
|||
mode = "updated" |
|||
for line in section_text.split("\n"): |
|||
if "**Added:**" in line: |
|||
mode = "added" |
|||
continue |
|||
if "**Removed:**" in line: |
|||
mode = "removed" |
|||
continue |
|||
if not line.startswith("|") or line.startswith("| Package") or line.startswith("|---"): |
|||
continue |
|||
|
|||
parts = [p.strip() for p in line.split("|")[1:-1]] |
|||
if mode == "updated" and len(parts) >= 3: |
|||
pr = parts[3] if len(parts) >= 4 else "" |
|||
updated[parts[0]] = (parts[1], parts[2], pr) |
|||
elif len(parts) >= 2: |
|||
pr = parts[2] if len(parts) >= 3 else "" |
|||
if mode == "added": |
|||
added[parts[0]] = (parts[1], pr) |
|||
else: |
|||
removed[parts[0]] = (parts[1], pr) |
|||
|
|||
return updated, added, removed |
|||
|
|||
|
|||
def merge_prs(existing_pr, new_pr): |
|||
"""Merge PR numbers, avoiding duplicates.""" |
|||
if not existing_pr or not existing_pr.strip(): |
|||
return new_pr |
|||
if not new_pr or not new_pr.strip(): |
|||
return existing_pr |
|||
|
|||
# Parse existing PRs |
|||
existing_prs = [p.strip() for p in existing_pr.split(",") if p.strip()] |
|||
# Add new PR if not already present |
|||
if new_pr not in existing_prs: |
|||
existing_prs.append(new_pr) |
|||
return ", ".join(existing_prs) |
|||
|
|||
|
|||
def merge_changes(existing, new): |
|||
"""Merge new changes into existing records for the same version.""" |
|||
ex_updated, ex_added, ex_removed = existing |
|||
new_updated, new_added, new_removed = new |
|||
|
|||
merged_updated = dict(ex_updated) |
|||
merged_added = dict(ex_added) |
|||
merged_removed = dict(ex_removed) |
|||
|
|||
for pkg, (old_ver, new_ver, pr) in new_updated.items(): |
|||
if pkg in merged_updated: |
|||
existing_old_ver, existing_new_ver, existing_pr = merged_updated[pkg] |
|||
merged_pr = merge_prs(existing_pr, pr) |
|||
merged_updated[pkg] = (existing_old_ver, new_ver, merged_pr) |
|||
elif pkg in merged_added: |
|||
existing_ver, existing_pr = merged_added[pkg] |
|||
merged_pr = merge_prs(existing_pr, pr) |
|||
# Convert added to updated since the version changed again |
|||
del merged_added[pkg] |
|||
merged_updated[pkg] = (existing_ver, new_ver, merged_pr) |
|||
else: |
|||
merged_updated[pkg] = (old_ver, new_ver, pr) |
|||
|
|||
for pkg, (ver, pr) in new_added.items(): |
|||
if pkg in merged_removed: |
|||
removed_ver, removed_pr = merged_removed.pop(pkg) |
|||
merged_pr = merge_prs(removed_pr, pr) |
|||
merged_updated[pkg] = (removed_ver, ver, merged_pr) |
|||
elif pkg in merged_added: |
|||
existing_ver, existing_pr = merged_added[pkg] |
|||
merged_pr = merge_prs(existing_pr, pr) |
|||
merged_added[pkg] = (ver, merged_pr) |
|||
else: |
|||
merged_added[pkg] = (ver, pr) |
|||
|
|||
for pkg, (ver, pr) in new_removed.items(): |
|||
if pkg in merged_added: |
|||
existing_ver, existing_pr = merged_added[pkg] |
|||
# Only delete if versions match (added then removed the same version) |
|||
if existing_ver == ver: |
|||
del merged_added[pkg] |
|||
else: |
|||
# Version changed between add and remove, convert to updated then removed |
|||
del merged_added[pkg] |
|||
merged_removed[pkg] = (ver, merge_prs(existing_pr, pr)) |
|||
elif pkg in merged_updated: |
|||
old_ver, new_ver, existing_pr = merged_updated.pop(pkg) |
|||
merged_pr = merge_prs(existing_pr, pr) |
|||
# Only keep as removed if the final state is different from original |
|||
merged_removed[pkg] = (old_ver, merged_pr) |
|||
else: |
|||
merged_removed[pkg] = (ver, pr) |
|||
|
|||
# Remove updated entries where old and new versions are the same |
|||
merged_updated = {k: v for k, v in merged_updated.items() if v[0] != v[1]} |
|||
|
|||
# Remove added entries that are also in removed with the same version |
|||
for pkg in list(merged_added.keys()): |
|||
if pkg in merged_removed: |
|||
added_ver, added_pr = merged_added[pkg] |
|||
removed_ver, removed_pr = merged_removed[pkg] |
|||
if added_ver == removed_ver: |
|||
# Package was added and removed at the same version, cancel out |
|||
del merged_added[pkg] |
|||
del merged_removed[pkg] |
|||
|
|||
return merged_updated, merged_added, merged_removed |
|||
|
|||
|
|||
def render_section(version, updated, added, removed): |
|||
"""Render a version section as markdown.""" |
|||
lines = [f"## {version}\n"] |
|||
|
|||
if updated: |
|||
lines.append("| Package | Old Version | New Version | PR |") |
|||
lines.append("|---------|-------------|-------------|-----|") |
|||
for pkg in sorted(updated): |
|||
old_ver, new_ver, pr = updated[pkg] |
|||
lines.append(f"| {pkg} | {old_ver} | {new_ver} | {pr} |") |
|||
lines.append("") |
|||
|
|||
if added: |
|||
lines.append("**Added:**\n") |
|||
lines.append("| Package | Version | PR |") |
|||
lines.append("|---------|---------|-----|") |
|||
for pkg in sorted(added): |
|||
ver, pr = added[pkg] |
|||
lines.append(f"| {pkg} | {ver} | {pr} |") |
|||
lines.append("") |
|||
|
|||
if removed: |
|||
lines.append("**Removed:**\n") |
|||
lines.append("| Package | Version | PR |") |
|||
lines.append("|---------|---------|-----|") |
|||
for pkg in sorted(removed): |
|||
ver, pr = removed[pkg] |
|||
lines.append(f"| {pkg} | {ver} | {pr} |") |
|||
lines.append("") |
|||
|
|||
return "\n".join(lines) |
|||
|
|||
|
|||
def parse_document(content): |
|||
"""Split document into a list of (version, section_text) tuples.""" |
|||
sections = [] |
|||
current_version = None |
|||
current_lines = [] |
|||
|
|||
for line in content.split("\n"): |
|||
match = re.match(r"^## (.+)$", line) |
|||
if match: |
|||
if current_version: |
|||
sections.append((current_version, "\n".join(current_lines))) |
|||
current_version = match.group(1).strip() |
|||
current_lines = [line] |
|||
elif current_version: |
|||
current_lines.append(line) |
|||
|
|||
if current_version: |
|||
sections.append((current_version, "\n".join(current_lines))) |
|||
|
|||
return sections |
|||
|
|||
|
|||
def main(): |
|||
if len(sys.argv) < 3: |
|||
print("Usage: update_dependency_changes.py <base-ref> <pr-number>") |
|||
sys.exit(1) |
|||
|
|||
base_ref = sys.argv[1] |
|||
pr_arg = sys.argv[2] |
|||
|
|||
# Validate PR number is numeric |
|||
if not re.fullmatch(r"\d+", pr_arg): |
|||
print("Invalid PR number; must be numeric.") |
|||
sys.exit(1) |
|||
|
|||
# Validate base_ref doesn't contain dangerous characters |
|||
if not re.fullmatch(r"[a-zA-Z0-9/_.-]+", base_ref): |
|||
print("Invalid base ref; contains invalid characters.") |
|||
sys.exit(1) |
|||
|
|||
pr_number = f"#{pr_arg}" |
|||
|
|||
version = get_version() |
|||
if not version: |
|||
print("Could not read version from common.props.") |
|||
sys.exit(1) |
|||
|
|||
diff = get_diff(base_ref) |
|||
if not diff: |
|||
print("No diff found for Directory.Packages.props.") |
|||
sys.exit(0) |
|||
|
|||
diff_lines = diff.split("\n") |
|||
old_packages = parse_diff_packages(diff_lines, "-") |
|||
new_packages = parse_diff_packages(diff_lines, "+") |
|||
|
|||
new_updated, new_added, new_removed = classify_changes(old_packages, new_packages, pr_number) |
|||
|
|||
if not new_updated and not new_added and not new_removed: |
|||
print("No package version changes detected.") |
|||
sys.exit(0) |
|||
|
|||
# Load existing document from the base branch |
|||
existing_content = get_existing_doc_from_base(base_ref) |
|||
sections = parse_document(existing_content) if existing_content else [] |
|||
|
|||
# Find existing section for this version |
|||
version_index = None |
|||
for i, (v, _) in enumerate(sections): |
|||
if v == version: |
|||
version_index = i |
|||
break |
|||
|
|||
if version_index is not None: |
|||
existing = parse_existing_section(sections[version_index][1]) |
|||
merged = merge_changes(existing, (new_updated, new_added, new_removed)) |
|||
section_text = render_section(version, *merged) |
|||
sections[version_index] = (version, section_text) |
|||
else: |
|||
section_text = render_section(version, new_updated, new_added, new_removed) |
|||
sections.insert(0, (version, section_text)) |
|||
|
|||
# Write document |
|||
doc_dir = os.path.dirname(DOC_PATH) |
|||
if doc_dir: |
|||
os.makedirs(doc_dir, exist_ok=True) |
|||
with open(DOC_PATH, "w") as f: |
|||
f.write(HEADER + "\n") |
|||
for _, text in sections: |
|||
f.write(text.rstrip("\n") + "\n\n") |
|||
|
|||
print(f"Updated {DOC_PATH} for version {version}") |
|||
|
|||
|
|||
if __name__ == "__main__": |
|||
main() |
|||
@ -0,0 +1,71 @@ |
|||
# Automatically detects and documents NuGet package version changes in PRs. |
|||
# Triggers on changes to Directory.Packages.props and: |
|||
# - Adds 'dependency-change' label to the PR |
|||
# - Updates docs/en/package-version-changes.md with version changes |
|||
# - Commits the documentation back to the PR branch |
|||
# Note: Only runs for PRs from the same repository (not forks) to ensure write permissions. |
|||
name: Nuget Packages Version Change Detector |
|||
|
|||
on: |
|||
pull_request: |
|||
paths: |
|||
- 'Directory.Packages.props' |
|||
types: |
|||
- opened |
|||
- synchronize |
|||
- reopened |
|||
- ready_for_review |
|||
|
|||
permissions: |
|||
contents: read |
|||
|
|||
concurrency: |
|||
group: dependency-changes-${{ github.event.pull_request.number }} |
|||
cancel-in-progress: false |
|||
|
|||
jobs: |
|||
label: |
|||
if: ${{ !github.event.pull_request.draft && !startsWith(github.head_ref, 'auto-merge/') && github.event.pull_request.head.repo.full_name == github.repository && !contains(github.event.head_commit.message, '[skip ci]') }} |
|||
permissions: |
|||
contents: write |
|||
pull-requests: write |
|||
runs-on: ubuntu-latest |
|||
env: |
|||
DOC_PATH: docs/en/package-version-changes.md |
|||
steps: |
|||
- run: gh pr edit "$PR_NUMBER" --add-label "dependency-change" |
|||
env: |
|||
PR_NUMBER: ${{ github.event.pull_request.number }} |
|||
GH_TOKEN: ${{ secrets.BOT_SECRET }} |
|||
GH_REPO: ${{ github.repository }} |
|||
|
|||
- uses: actions/checkout@v4 |
|||
with: |
|||
ref: ${{ github.event.pull_request.head.ref }} |
|||
fetch-depth: 1 |
|||
|
|||
- name: Fetch base branch |
|||
run: git fetch origin ${{ github.event.pull_request.base.ref }}:refs/remotes/origin/${{ github.event.pull_request.base.ref }} --depth=1 |
|||
|
|||
- uses: actions/setup-python@v5 |
|||
with: |
|||
python-version: '3.x' |
|||
|
|||
- run: python .github/scripts/update_dependency_changes.py ${{ github.event.pull_request.base.ref }} ${{ github.event.pull_request.number }} |
|||
|
|||
- name: Commit changes |
|||
run: | |
|||
set -e |
|||
git config user.name "github-actions[bot]" |
|||
git config user.email "github-actions[bot]@users.noreply.github.com" |
|||
git add "$DOC_PATH" |
|||
if git diff --staged --quiet; then |
|||
echo "No changes to commit." |
|||
else |
|||
git commit -m "docs: update package version changes [skip ci]" |
|||
if ! git push; then |
|||
echo "Error: Failed to push changes. This may be due to conflicts or permission issues." |
|||
exit 1 |
|||
fi |
|||
echo "Successfully committed and pushed documentation changes." |
|||
fi |
|||
@ -0,0 +1,658 @@ |
|||
name: Update ABP Studio Docs |
|||
|
|||
on: |
|||
repository_dispatch: |
|||
types: [update_studio_docs] |
|||
workflow_dispatch: |
|||
inputs: |
|||
version: |
|||
description: 'Studio version (e.g., 2.1.10)' |
|||
required: true |
|||
name: |
|||
description: 'Release name' |
|||
required: true |
|||
notes: |
|||
description: 'Raw release notes' |
|||
required: true |
|||
url: |
|||
description: 'Release URL' |
|||
required: true |
|||
target_branch: |
|||
description: 'Target branch (default: dev)' |
|||
required: false |
|||
default: 'dev' |
|||
|
|||
jobs: |
|||
update-docs: |
|||
runs-on: ubuntu-latest |
|||
permissions: |
|||
contents: write |
|||
pull-requests: write |
|||
models: read |
|||
|
|||
steps: |
|||
# ------------------------------------------------- |
|||
# Extract payload (repository_dispatch or workflow_dispatch) |
|||
# ------------------------------------------------- |
|||
- name: Extract payload |
|||
id: payload |
|||
run: | |
|||
if [ "${{ github.event_name }}" = "repository_dispatch" ]; then |
|||
echo "version=${{ github.event.client_payload.version }}" >> $GITHUB_OUTPUT |
|||
echo "name=${{ github.event.client_payload.name }}" >> $GITHUB_OUTPUT |
|||
echo "url=${{ github.event.client_payload.url }}" >> $GITHUB_OUTPUT |
|||
echo "target_branch=${{ github.event.client_payload.target_branch || 'dev' }}" >> $GITHUB_OUTPUT |
|||
|
|||
# Save notes to environment variable (multiline) |
|||
{ |
|||
echo "RAW_NOTES<<NOTES_DELIMITER_EOF" |
|||
jq -r '.client_payload.notes' "$GITHUB_EVENT_PATH" |
|||
echo "NOTES_DELIMITER_EOF" |
|||
} >> $GITHUB_ENV |
|||
else |
|||
echo "version=${{ github.event.inputs.version }}" >> $GITHUB_OUTPUT |
|||
echo "name=${{ github.event.inputs.name }}" >> $GITHUB_OUTPUT |
|||
echo "url=${{ github.event.inputs.url }}" >> $GITHUB_OUTPUT |
|||
echo "target_branch=${{ github.event.inputs.target_branch || 'dev' }}" >> $GITHUB_OUTPUT |
|||
|
|||
# Save notes to environment variable (multiline) |
|||
{ |
|||
echo "RAW_NOTES<<NOTES_DELIMITER_EOF" |
|||
echo "${{ github.event.inputs.notes }}" |
|||
echo "NOTES_DELIMITER_EOF" |
|||
} >> $GITHUB_ENV |
|||
fi |
|||
|
|||
- name: Validate payload |
|||
env: |
|||
VERSION: ${{ steps.payload.outputs.version }} |
|||
NAME: ${{ steps.payload.outputs.name }} |
|||
URL: ${{ steps.payload.outputs.url }} |
|||
TARGET_BRANCH: ${{ steps.payload.outputs.target_branch }} |
|||
run: | |
|||
if [ -z "$VERSION" ] || [ "$VERSION" = "null" ]; then |
|||
echo "❌ Missing: version" |
|||
exit 1 |
|||
fi |
|||
if [ -z "$NAME" ] || [ "$NAME" = "null" ]; then |
|||
echo "❌ Missing: name" |
|||
exit 1 |
|||
fi |
|||
if [ -z "$URL" ] || [ "$URL" = "null" ]; then |
|||
echo "❌ Missing: url" |
|||
exit 1 |
|||
fi |
|||
if [ -z "$RAW_NOTES" ]; then |
|||
echo "❌ Missing: release notes" |
|||
exit 1 |
|||
fi |
|||
|
|||
echo "✅ Payload validated" |
|||
echo " Version: $VERSION" |
|||
echo " Name: $NAME" |
|||
echo " Target Branch: $TARGET_BRANCH" |
|||
|
|||
# ------------------------------------------------- |
|||
# Checkout target branch |
|||
# ------------------------------------------------- |
|||
- name: Checkout |
|||
uses: actions/checkout@v4 |
|||
with: |
|||
ref: ${{ steps.payload.outputs.target_branch }} |
|||
fetch-depth: 0 |
|||
|
|||
- name: Configure git |
|||
run: | |
|||
git config user.name "github-actions[bot]" |
|||
git config user.email "github-actions[bot]@users.noreply.github.com" |
|||
|
|||
# ------------------------------------------------- |
|||
# Create working branch |
|||
# ------------------------------------------------- |
|||
- name: Create branch |
|||
env: |
|||
VERSION: ${{ steps.payload.outputs.version }} |
|||
run: | |
|||
BRANCH="docs/studio-${VERSION}" |
|||
|
|||
# Delete remote branch if exists (idempotent) |
|||
git push origin --delete "$BRANCH" 2>/dev/null || true |
|||
|
|||
git checkout -B "$BRANCH" |
|||
echo "BRANCH=$BRANCH" >> $GITHUB_ENV |
|||
|
|||
# ------------------------------------------------- |
|||
# Analyze existing release notes format |
|||
# ------------------------------------------------- |
|||
- name: Analyze existing format |
|||
id: analyze |
|||
run: | |
|||
FILE="docs/en/studio/release-notes.md" |
|||
|
|||
if [ -f "$FILE" ] && [ -s "$FILE" ]; then |
|||
{ |
|||
echo "EXISTING_FORMAT<<DELIMITER_EOF" |
|||
head -50 "$FILE" | sed 's/DELIMITER_EOF/DELIMITER_E0F/g' |
|||
echo "DELIMITER_EOF" |
|||
} >> $GITHUB_OUTPUT |
|||
else |
|||
{ |
|||
echo "EXISTING_FORMAT<<DELIMITER_EOF" |
|||
echo "# ABP Studio Release Notes" |
|||
echo "" |
|||
echo "## 2.1.0 (2025-12-08) Latest" |
|||
echo "- Enhanced Module Installation UI" |
|||
echo "- Added AI Management option" |
|||
echo "DELIMITER_EOF" |
|||
} >> $GITHUB_OUTPUT |
|||
fi |
|||
|
|||
# ------------------------------------------------- |
|||
# Try AI formatting (OPTIONAL - never fails workflow) |
|||
# ------------------------------------------------- |
|||
- name: Format release notes with AI |
|||
id: ai |
|||
continue-on-error: true |
|||
uses: actions/ai-inference@v1 |
|||
with: |
|||
model: openai/gpt-4.1 |
|||
prompt: | |
|||
You are a technical writer for ABP Studio release notes. |
|||
|
|||
Existing release notes format: |
|||
${{ steps.analyze.outputs.EXISTING_FORMAT }} |
|||
|
|||
New release: |
|||
Version: ${{ steps.payload.outputs.version }} |
|||
Name: ${{ steps.payload.outputs.name }} |
|||
Raw notes: |
|||
${{ env.RAW_NOTES }} |
|||
|
|||
CRITICAL RULES: |
|||
1. Extract ONLY essential, user-facing changes |
|||
2. Format as bullet points starting with "- " |
|||
3. Keep it concise and professional |
|||
4. Match the style of existing release notes |
|||
5. Skip internal/technical details unless critical |
|||
6. Return ONLY the bullet points (no version header, no date) |
|||
7. One change per line |
|||
|
|||
Output example: |
|||
- Fixed books sample for blazor-webapp tiered solution |
|||
- Enhanced Module Installation UI |
|||
- Added AI Management option to Startup Templates |
|||
|
|||
Return ONLY the formatted bullet points. |
|||
|
|||
# ------------------------------------------------- |
|||
# Fallback: Use raw notes if AI unavailable |
|||
# ------------------------------------------------- |
|||
- name: Prepare final release notes |
|||
run: | |
|||
mkdir -p .tmp |
|||
|
|||
AI_RESPONSE="${{ steps.ai.outputs.response }}" |
|||
|
|||
if [ -n "$AI_RESPONSE" ] && [ "$AI_RESPONSE" != "null" ]; then |
|||
echo "✅ Using AI-formatted release notes" |
|||
echo "$AI_RESPONSE" > .tmp/final-notes.txt |
|||
else |
|||
echo "⚠️ AI unavailable - using aggressive cleaning on raw release notes" |
|||
|
|||
# Clean and format raw notes with aggressive filtering |
|||
echo "$RAW_NOTES" | while IFS= read -r line; do |
|||
# Skip empty lines |
|||
[ -z "$line" ] && continue |
|||
|
|||
# Skip section headers |
|||
[[ "$line" =~ ^#+.*What.*Changed ]] && continue |
|||
[[ "$line" =~ ^##[[:space:]] ]] && continue |
|||
|
|||
# Skip full changelog links |
|||
[[ "$line" =~ ^\*\*Full\ Changelog ]] && continue |
|||
[[ "$line" =~ ^Full\ Changelog ]] && continue |
|||
|
|||
# Remove leading bullet/asterisk |
|||
line=$(echo "$line" | sed 's/^[[:space:]]*[*-][[:space:]]*//') |
|||
|
|||
# Aggressive cleaning: remove entire " by @user in https://..." suffix |
|||
line=$(echo "$line" | sed 's/[[:space:]]*by @[a-zA-Z0-9_-]*[[:space:]]*in https:\/\/github\.com\/[^[:space:]]*//g') |
|||
|
|||
# Remove remaining "by @username" or "by username" |
|||
line=$(echo "$line" | sed 's/[[:space:]]*by @[a-zA-Z0-9_-]*[[:space:]]*$//g') |
|||
line=$(echo "$line" | sed 's/[[:space:]]*by [a-zA-Z0-9_-]*[[:space:]]*$//g') |
|||
|
|||
# Remove standalone @mentions |
|||
line=$(echo "$line" | sed 's/@[a-zA-Z0-9_-]*//g') |
|||
|
|||
# Clean trailing periods if orphaned |
|||
line=$(echo "$line" | sed 's/\.[[:space:]]*$//') |
|||
|
|||
# Trim all whitespace |
|||
line=$(echo "$line" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') |
|||
|
|||
# Skip if line is empty or too short |
|||
[ -z "$line" ] && continue |
|||
[ ${#line} -lt 5 ] && continue |
|||
|
|||
# Capitalize first letter if lowercase |
|||
line="$(echo ${line:0:1} | tr '[:lower:]' '[:upper:]')${line:1}" |
|||
|
|||
# Add clean bullet and output |
|||
echo "- $line" |
|||
done > .tmp/final-notes.txt |
|||
fi |
|||
|
|||
# Safety check: verify we have content |
|||
if [ ! -s .tmp/final-notes.txt ]; then |
|||
echo "⚠️ No valid release notes extracted, using minimal fallback" |
|||
echo "- Release ${{ steps.payload.outputs.version }}" > .tmp/final-notes.txt |
|||
fi |
|||
|
|||
echo "=== Final release notes ===" |
|||
cat .tmp/final-notes.txt |
|||
echo "===========================" |
|||
|
|||
# ------------------------------------------------- |
|||
# Update release-notes.md (move "Latest" tag correctly) |
|||
# ------------------------------------------------- |
|||
- name: Update release-notes.md |
|||
env: |
|||
VERSION: ${{ steps.payload.outputs.version }} |
|||
NAME: ${{ steps.payload.outputs.name }} |
|||
URL: ${{ steps.payload.outputs.url }} |
|||
run: | |
|||
FILE="docs/en/studio/release-notes.md" |
|||
DATE="$(date +%Y-%m-%d)" |
|||
|
|||
mkdir -p docs/en/studio |
|||
|
|||
# Check if version already exists (idempotent) |
|||
if [ -f "$FILE" ] && grep -q "^## $VERSION " "$FILE"; then |
|||
echo "⚠️ Version $VERSION already exists in release notes - skipping update" |
|||
echo "VERSION_UPDATED=false" >> $GITHUB_ENV |
|||
exit 0 |
|||
fi |
|||
|
|||
# Read final notes |
|||
NOTES_CONTENT="$(cat .tmp/final-notes.txt)" |
|||
|
|||
# Create new entry |
|||
NEW_ENTRY="## $VERSION ($DATE) Latest |
|||
|
|||
$NOTES_CONTENT |
|||
" |
|||
|
|||
# Process file |
|||
if [ ! -f "$FILE" ]; then |
|||
# Create new file |
|||
cat > "$FILE" <<EOF |
|||
# ABP Studio Release Notes |
|||
|
|||
$NEW_ENTRY |
|||
EOF |
|||
else |
|||
# Remove "Latest" tag from existing entries and insert new one |
|||
awk -v new_entry="$NEW_ENTRY" ' |
|||
BEGIN { inserted = 0 } |
|||
|
|||
# Remove "Latest" from existing entries |
|||
/^## [0-9]/ { |
|||
gsub(/ Latest$/, "", $0) |
|||
} |
|||
|
|||
# Insert after first "## " (version heading) or after title |
|||
/^## / && !inserted { |
|||
print new_entry |
|||
inserted = 1 |
|||
} |
|||
|
|||
# Print current line |
|||
{ print } |
|||
|
|||
# If we reach end without inserting, add at end |
|||
END { |
|||
if (!inserted) { |
|||
print "" |
|||
print new_entry |
|||
} |
|||
} |
|||
' "$FILE" > "$FILE.new" |
|||
|
|||
mv "$FILE.new" "$FILE" |
|||
fi |
|||
|
|||
echo "VERSION_UPDATED=true" >> $GITHUB_ENV |
|||
|
|||
echo "=== Updated release-notes.md preview ===" |
|||
head -30 "$FILE" |
|||
echo "========================================" |
|||
|
|||
# ------------------------------------------------- |
|||
# Fetch latest stable ABP version (no preview/rc/beta) |
|||
# ------------------------------------------------- |
|||
- name: Fetch latest stable ABP version |
|||
id: abp |
|||
run: | |
|||
# Fetch all releases |
|||
RELEASES=$(curl -fsS \ |
|||
-H "Accept: application/vnd.github+json" \ |
|||
-H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ |
|||
"https://api.github.com/repos/abpframework/abp/releases?per_page=20") |
|||
|
|||
# Filter stable releases (exclude preview, rc, beta, dev) |
|||
ABP_VERSION=$(echo "$RELEASES" | jq -r ' |
|||
[.[] | select( |
|||
(.prerelease == false) and |
|||
(.tag_name | test("preview|rc|beta|dev"; "i") | not) |
|||
)] | first | .tag_name |
|||
') |
|||
|
|||
if [ -z "$ABP_VERSION" ] || [ "$ABP_VERSION" = "null" ]; then |
|||
echo "❌ Could not determine latest stable ABP version" |
|||
exit 1 |
|||
fi |
|||
|
|||
echo "✅ Latest stable ABP version: $ABP_VERSION" |
|||
echo "ABP_VERSION=$ABP_VERSION" >> $GITHUB_ENV |
|||
|
|||
# ------------------------------------------------- |
|||
# Update version-mapping.md (smart range expansion) |
|||
# ------------------------------------------------- |
|||
- name: Update version-mapping.md |
|||
env: |
|||
STUDIO_VERSION: ${{ steps.payload.outputs.version }} |
|||
run: | |
|||
FILE="docs/en/studio/version-mapping.md" |
|||
ABP_VERSION="${{ env.ABP_VERSION }}" |
|||
|
|||
mkdir -p docs/en/studio |
|||
|
|||
# Create file if doesn't exist |
|||
if [ ! -f "$FILE" ]; then |
|||
cat > "$FILE" <<EOF |
|||
# ABP Studio and ABP Startup Template Version Mappings |
|||
|
|||
| **ABP Studio Version** | **ABP Version of Startup Template** | |
|||
|------------------------|-------------------------------------| |
|||
| $STUDIO_VERSION | $ABP_VERSION | |
|||
EOF |
|||
echo "MAPPING_UPDATED=true" >> $GITHUB_ENV |
|||
exit 0 |
|||
fi |
|||
|
|||
# Use Python for smart version range handling |
|||
python3 <<'PYTHON_EOF' |
|||
import os |
|||
import re |
|||
from packaging.version import Version, InvalidVersion |
|||
|
|||
studio_ver = os.environ["STUDIO_VERSION"] |
|||
abp_ver = os.environ["ABP_VERSION"] |
|||
file_path = "docs/en/studio/version-mapping.md" |
|||
|
|||
try: |
|||
studio = Version(studio_ver) |
|||
except InvalidVersion: |
|||
print(f"❌ Invalid Studio version: {studio_ver}") |
|||
exit(1) |
|||
|
|||
with open(file_path, 'r') as f: |
|||
lines = f.readlines() |
|||
|
|||
# Find table start (skip SEO and headers) |
|||
table_start = 0 |
|||
table_end = 0 |
|||
for i, line in enumerate(lines): |
|||
if line.strip().startswith('|') and '**ABP Studio Version**' in line: |
|||
table_start = i |
|||
elif table_start > 0 and line.strip() and not line.strip().startswith('|'): |
|||
table_end = i |
|||
break |
|||
|
|||
if table_start == 0: |
|||
print("❌ Could not find version mapping table") |
|||
exit(1) |
|||
|
|||
# If no end found, table goes to end of file |
|||
if table_end == 0: |
|||
table_end = len(lines) |
|||
|
|||
# Extract sections |
|||
before_table = lines[:table_start] # Everything before table |
|||
table_header = lines[table_start:table_start+2] # Header + separator |
|||
data_rows = [l for l in lines[table_start+2:table_end] if l.strip().startswith('|')] # Data rows |
|||
after_table = lines[table_end:] # Everything after table |
|||
|
|||
new_rows = [] |
|||
handled = False |
|||
|
|||
def parse_version_range(version_str): |
|||
"""Parse '2.1.5 - 2.1.9' or '2.1.5' into (start, end)""" |
|||
version_str = version_str.strip() |
|||
|
|||
if '–' in version_str or '-' in version_str: |
|||
# Handle both em-dash and hyphen |
|||
parts = re.split(r'\s*[–-]\s*', version_str) |
|||
if len(parts) == 2: |
|||
try: |
|||
return Version(parts[0].strip()), Version(parts[1].strip()) |
|||
except InvalidVersion: |
|||
return None, None |
|||
|
|||
try: |
|||
v = Version(version_str) |
|||
return v, v |
|||
except InvalidVersion: |
|||
return None, None |
|||
|
|||
def format_row(studio_range, abp_version): |
|||
"""Format a table row with proper spacing""" |
|||
return f"| {studio_range:<22} | {abp_version:<27} |\n" |
|||
|
|||
# Process existing rows |
|||
for row in data_rows: |
|||
match = re.match(r'\|\s*(.+?)\s*\|\s*(.+?)\s*\|', row) |
|||
if not match: |
|||
continue |
|||
|
|||
existing_studio_range = match.group(1).strip() |
|||
existing_abp = match.group(2).strip() |
|||
|
|||
# Only consider rows with matching ABP version |
|||
if existing_abp != abp_ver: |
|||
new_rows.append(row) |
|||
continue |
|||
|
|||
start_ver, end_ver = parse_version_range(existing_studio_range) |
|||
|
|||
if start_ver is None or end_ver is None: |
|||
new_rows.append(row) |
|||
continue |
|||
|
|||
# Check if current studio version is in this range |
|||
if start_ver <= studio <= end_ver: |
|||
print(f"✅ Studio version {studio_ver} already covered in range {existing_studio_range}") |
|||
handled = True |
|||
new_rows.append(row) |
|||
|
|||
# Check if we should extend the range |
|||
elif end_ver < studio: |
|||
# Calculate if studio is the next logical version |
|||
# For patch versions: 2.1.9 -> 2.1.10 |
|||
# For minor versions: 2.1.9 -> 2.2.0 |
|||
|
|||
# Simple heuristic: if major.minor match and patch increments, extend range |
|||
if (start_ver.major == studio.major and |
|||
start_ver.minor == studio.minor and |
|||
studio.micro <= end_ver.micro + 5): # Allow small gaps |
|||
|
|||
new_range = f"{start_ver} - {studio}" |
|||
new_rows.append(format_row(new_range, abp_ver)) |
|||
print(f"✅ Extended range: {new_range}") |
|||
handled = True |
|||
else: |
|||
new_rows.append(row) |
|||
else: |
|||
new_rows.append(row) |
|||
|
|||
# If not handled, add new row at top of data |
|||
if not handled: |
|||
new_row = format_row(str(studio), abp_ver) |
|||
new_rows.insert(0, new_row) |
|||
print(f"✅ Added new mapping: {studio_ver} -> {abp_ver}") |
|||
|
|||
# Write updated file - preserve ALL content |
|||
with open(file_path, 'w') as f: |
|||
f.writelines(before_table) # SEO, title, intro text |
|||
f.writelines(table_header) # Table header |
|||
f.writelines(new_rows) # Updated data rows |
|||
f.writelines(after_table) # Content after table (preview section, etc.) |
|||
|
|||
print("MAPPING_UPDATED=true") |
|||
PYTHON_EOF |
|||
|
|||
echo "MAPPING_UPDATED=true" >> $GITHUB_ENV |
|||
|
|||
echo "=== Updated version-mapping.md preview ===" |
|||
head -35 "$FILE" |
|||
echo "==========================================" |
|||
|
|||
# ------------------------------------------------- |
|||
# Check for changes |
|||
# ------------------------------------------------- |
|||
- name: Check for changes |
|||
id: changes |
|||
run: | |
|||
git add docs/en/studio/ |
|||
|
|||
if git diff --cached --quiet; then |
|||
echo "has_changes=false" >> $GITHUB_OUTPUT |
|||
echo "⚠️ No changes detected" |
|||
else |
|||
echo "has_changes=true" >> $GITHUB_OUTPUT |
|||
echo "✅ Changes detected:" |
|||
git diff --cached --stat |
|||
fi |
|||
|
|||
# ------------------------------------------------- |
|||
# Commit & push |
|||
# ------------------------------------------------- |
|||
- name: Commit and push |
|||
if: steps.changes.outputs.has_changes == 'true' |
|||
env: |
|||
VERSION: ${{ steps.payload.outputs.version }} |
|||
NAME: ${{ steps.payload.outputs.name }} |
|||
run: | |
|||
git commit -m "docs(studio): update documentation for release $VERSION |
|||
|
|||
- Updated release notes for $VERSION |
|||
- Updated version mapping with ABP ${{ env.ABP_VERSION }} |
|||
|
|||
Release: $NAME" |
|||
|
|||
git push -f origin "$BRANCH" |
|||
|
|||
# ------------------------------------------------- |
|||
# Create or update PR |
|||
# ------------------------------------------------- |
|||
- name: Create or update PR |
|||
if: steps.changes.outputs.has_changes == 'true' |
|||
env: |
|||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
|||
VERSION: ${{ steps.payload.outputs.version }} |
|||
NAME: ${{ steps.payload.outputs.name }} |
|||
URL: ${{ steps.payload.outputs.url }} |
|||
TARGET_BRANCH: ${{ steps.payload.outputs.target_branch }} |
|||
run: | |
|||
# Check for existing PR |
|||
EXISTING_PR=$(gh pr list \ |
|||
--head "$BRANCH" \ |
|||
--base "$TARGET_BRANCH" \ |
|||
--json number \ |
|||
--jq '.[0].number' 2>/dev/null || echo "") |
|||
|
|||
PR_BODY="Automated documentation update for ABP Studio release **$VERSION**. |
|||
|
|||
## Release Information |
|||
- **Version**: $VERSION |
|||
- **Name**: $NAME |
|||
- **Release**: [View on GitHub]($URL) |
|||
- **ABP Framework Version**: ${{ env.ABP_VERSION }} |
|||
|
|||
## Changes |
|||
- ✅ Updated [release-notes.md](docs/en/studio/release-notes.md) |
|||
- ✅ Updated [version-mapping.md](docs/en/studio/version-mapping.md) |
|||
|
|||
--- |
|||
|
|||
*This PR was automatically generated by the [update-studio-docs workflow](.github/workflows/update-studio-docs.yml)*" |
|||
|
|||
if [ -n "$EXISTING_PR" ]; then |
|||
echo "🔄 Updating existing PR #$EXISTING_PR" |
|||
|
|||
gh pr edit "$EXISTING_PR" \ |
|||
--title "docs(studio): release $VERSION - $NAME" \ |
|||
--body "$PR_BODY" |
|||
|
|||
echo "PR_NUMBER=$EXISTING_PR" >> $GITHUB_ENV |
|||
else |
|||
echo "📝 Creating new PR" |
|||
|
|||
sleep 2 # Wait for GitHub to sync |
|||
|
|||
PR_URL=$(gh pr create \ |
|||
--title "docs(studio): release $VERSION - $NAME" \ |
|||
--body "$PR_BODY" \ |
|||
--base "$TARGET_BRANCH" \ |
|||
--head "$BRANCH") |
|||
|
|||
PR_NUMBER=$(echo "$PR_URL" | grep -oE '[0-9]+$') |
|||
echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_ENV |
|||
echo "✅ Created PR #$PR_NUMBER: $PR_URL" |
|||
fi |
|||
|
|||
# ------------------------------------------------- |
|||
# Enable auto-merge (safe with branch protection) |
|||
# ------------------------------------------------- |
|||
- name: Enable auto-merge |
|||
if: steps.changes.outputs.has_changes == 'true' |
|||
env: |
|||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
|||
continue-on-error: true |
|||
run: | |
|||
echo "🔄 Attempting to enable auto-merge for PR #$PR_NUMBER" |
|||
|
|||
gh pr merge "$PR_NUMBER" \ |
|||
--auto \ |
|||
--squash \ |
|||
--delete-branch || { |
|||
echo "⚠️ Auto-merge not available (branch protection or permissions)" |
|||
echo " PR #$PR_NUMBER is ready for manual review" |
|||
} |
|||
|
|||
# ------------------------------------------------- |
|||
# Summary |
|||
# ------------------------------------------------- |
|||
- name: Workflow summary |
|||
if: always() |
|||
env: |
|||
VERSION: ${{ steps.payload.outputs.version }} |
|||
run: | |
|||
echo "## 📚 ABP Studio Docs Update Summary" >> $GITHUB_STEP_SUMMARY |
|||
echo "" >> $GITHUB_STEP_SUMMARY |
|||
echo "**Version**: $VERSION" >> $GITHUB_STEP_SUMMARY |
|||
echo "**Release**: ${{ steps.payload.outputs.name }}" >> $GITHUB_STEP_SUMMARY |
|||
echo "**Target Branch**: ${{ steps.payload.outputs.target_branch }}" >> $GITHUB_STEP_SUMMARY |
|||
echo "" >> $GITHUB_STEP_SUMMARY |
|||
|
|||
if [ "${{ steps.changes.outputs.has_changes }}" = "true" ]; then |
|||
echo "### ✅ Changes Applied" >> $GITHUB_STEP_SUMMARY |
|||
echo "- Release notes updated: ${{ env.VERSION_UPDATED }}" >> $GITHUB_STEP_SUMMARY |
|||
echo "- Version mapping updated: ${{ env.MAPPING_UPDATED }}" >> $GITHUB_STEP_SUMMARY |
|||
echo "- ABP Framework version: ${{ env.ABP_VERSION }}" >> $GITHUB_STEP_SUMMARY |
|||
echo "- PR: #${{ env.PR_NUMBER }}" >> $GITHUB_STEP_SUMMARY |
|||
else |
|||
echo "### ⚠️ No Changes" >> $GITHUB_STEP_SUMMARY |
|||
echo "Version $VERSION already exists in documentation." >> $GITHUB_STEP_SUMMARY |
|||
fi |
|||
@ -0,0 +1,377 @@ |
|||
# Building a Multi-Agent AI System with A2A, MCP, and ADK in .NET |
|||
|
|||
> How we combined three open AI protocols — Google's A2A & ADK with Anthropic's MCP — to build a production-ready Multi-Agent Research Assistant using .NET 10. |
|||
|
|||
--- |
|||
|
|||
## Introduction |
|||
|
|||
The AI space is constantly changing and improving. Once again, we've moved past the single LLM calls and into the future of **Multi-Agent Systems**, in which expert AI agents act in unison as a collaborative team. |
|||
|
|||
But here is the problem: **How do you make agents communicate with each other? How do you equip agents with tools? How do you control them?** |
|||
|
|||
Three open protocols have emerged for answering these questions: |
|||
|
|||
- **MCP (Model Context Protocol)** by Anthropic — The "USB-C for AI" |
|||
- **A2A (Agent-to-Agent Protocol)** by Google — The "phone line between agents" |
|||
- **ADK (Agent Development Kit)** by Google — The "organizational chart for agents" |
|||
|
|||
In this article, I will briefly describe each protocol, highlight the benefits of the combination, and walk you through our own project: a **Multi-Agent Research Assistant** developed via ABP Framework. |
|||
|
|||
--- |
|||
|
|||
## The Problem: Why Single-Agent Isn't Enough |
|||
|
|||
Imagine you ask an AI: *"Research the latest AI agent frameworks and give me a comprehensive analysis report."* |
|||
|
|||
A single LLM call would: |
|||
- Hallucinate search results (can't actually browse the web) |
|||
- Produce a shallow analysis (no structured research pipeline) |
|||
- Lose context between steps (no state management) |
|||
- Can't save results anywhere (no tool access) |
|||
|
|||
What you actually need is a **team of specialists**: |
|||
|
|||
1. A **Researcher** who searches the web and gathers raw data |
|||
2. An **Analyst** who processes that data into a structured report |
|||
3. **Tools** that let agents interact with the real world (web, database, filesystem) |
|||
4. An **Orchestrator** that coordinates everything |
|||
|
|||
This is exactly what we built. |
|||
|
|||
 |
|||
--- |
|||
|
|||
## Protocol #1: MCP — Giving Agents Superpowers |
|||
|
|||
### What is MCP? |
|||
|
|||
**MCP (Model Context Protocol)**: Anthropic's standardized protocol allows AI models to be connected to all external tools and data sources. MCP can be thought of as **the USB-C of AI** – one port compatible with everything. |
|||
|
|||
Earlier, before MCP, if you wanted your LLM to do things such as search the web, query a database, and store files, you would need to write your own integration code for each capability. MCP lets you define your tools one time, and any agent that is MCP-compatible can make use of them. |
|||
|
|||
 |
|||
|
|||
### How MCP Works |
|||
|
|||
MCP follows a simple **Client-Server architecture**: |
|||
|
|||
 |
|||
|
|||
The flow is straightforward: |
|||
|
|||
1. **Discovery**: The agent asks "What tools do you have?" (`tools/list`) |
|||
2. **Invocation**: The agent calls a specific tool (`tools/call`) |
|||
3. **Result**: The tool returns data back to the agent |
|||
|
|||
### MCP in Our Project |
|||
|
|||
We built three MCP tool servers: |
|||
|
|||
| MCP Tool | Purpose | Used By | |
|||
|----------|---------|---------| |
|||
| `web_search` | Searches the web via Tavily API | Researcher Agent | |
|||
| `fetch_url_content` | Fetches content from a URL | Researcher Agent | |
|||
| `save_research_to_file` | Saves reports to the filesystem | Analysis Agent | |
|||
| `save_research_to_database` | Persists results in SQL Server | Analysis Agent | |
|||
| `search_past_research` | Queries historical research | Analysis Agent | |
|||
|
|||
The beauty of MCP is that you do not need to know how these tools are implemented inside the tool. You simply need to call them by their names as given in the description. |
|||
|
|||
--- |
|||
|
|||
## Protocol #2: A2A — Making Agents Talk to Each Other |
|||
|
|||
### What is A2A? |
|||
|
|||
**A2A (Agent to Agent)**, formerly proposed by Google and now presented under the Linux Foundation, describes a protocol allowing **one AI agent to discover another and trade tasks**. MCP fits as helping agents acquire tools; A2A helps them acquire the ability to speak. |
|||
|
|||
Think of it this way: |
|||
- **MCP** = "What can this agent *do*?" (capabilities) |
|||
- **A2A** = "How do agents *talk*?" (communication) |
|||
|
|||
### The Agent Card: Your Agent's Business Card |
|||
|
|||
Every A2A-compatible agent publishes an **Agent Card** — a JSON document that describes who it is and what it can do. It's like a business card for AI agents: |
|||
|
|||
```json |
|||
{ |
|||
"name": "Researcher Agent", |
|||
"description": "Searches the web to collect comprehensive research data", |
|||
"url": "https://localhost:44331/a2a/researcher", |
|||
"version": "1.0.0", |
|||
"capabilities": { |
|||
"streaming": false, |
|||
"pushNotifications": false |
|||
}, |
|||
"skills": [ |
|||
{ |
|||
"id": "web-research", |
|||
"name": "Web Research", |
|||
"description": "Searches the web on a given topic and collects raw data", |
|||
"tags": ["research", "web-search", "data-collection"] |
|||
} |
|||
] |
|||
} |
|||
``` |
|||
|
|||
Other agents can discover this card at `/.well-known/agent.json` and immediately know: |
|||
- What this agent does |
|||
- Where to reach it |
|||
- What skills it has |
|||
|
|||
 |
|||
|
|||
### How A2A Task Exchange Works |
|||
|
|||
Once an agent discovers another agent, it can send tasks: |
|||
|
|||
 |
|||
|
|||
The key concepts: |
|||
|
|||
- **Task**: A unit of work sent between agents (like an email with instructions) |
|||
- **Artifact**: The output produced by an agent (like an attachment in the reply) |
|||
- **Task State**: `Submitted → Working → Completed/Failed` |
|||
|
|||
### A2A in Our Project |
|||
|
|||
Agent communication in our system uses A2A: |
|||
|
|||
- The **Orchestrator** finds all agents through the Agent Cards |
|||
- It sends a research task to the **Researcher Agent** |
|||
- The Researcher’s output (artifacts) is used as input by **Analysis Agent** - The Analysis Agent creates the final structured report |
|||
|
|||
--- |
|||
|
|||
## Protocol #3: ADK — Organizing Your Agent Team |
|||
|
|||
### What is ADK? |
|||
|
|||
**ADK (Agent Development Kit)**, created by Google, provides patterns for **organizing and orchestrating multiple agents**. It answers the question: "How do you build a team of agents that work together efficiently?" |
|||
|
|||
ADK gives you: |
|||
- **BaseAgent**: A foundation every agent inherits from |
|||
- **SequentialAgent**: Runs agents one after another (pipeline) |
|||
- **ParallelAgent**: Runs agents simultaneously |
|||
- **AgentContext**: Shared state that flows through the pipeline |
|||
- **AgentEvent**: Control flow signals (escalate, transfer, state updates) |
|||
|
|||
> **Note**: ADK's official SDK is Python-only. We ported the core patterns to .NET for our project. |
|||
|
|||
### The Pipeline Pattern |
|||
|
|||
The most powerful ADK pattern is the **Sequential Pipeline**. Think of it as an assembly line in a factory: |
|||
|
|||
 |
|||
|
|||
Each agent: |
|||
1. Receives the shared **AgentContext** (with state from previous agents) |
|||
2. Does its work |
|||
3. Updates the state |
|||
4. Passes it to the next agent |
|||
|
|||
### AgentContext: The Shared Memory |
|||
|
|||
`AgentContext` is like a shared whiteboard that all agents can read from and write to: |
|||
|
|||
 |
|||
|
|||
This pattern eliminates the need for complex inter-agent messaging — agents simply read and write to a shared context. |
|||
|
|||
### ADK Orchestration Patterns |
|||
|
|||
ADK supports multiple orchestration patterns: |
|||
|
|||
| Pattern | Description | Use Case | |
|||
|---------|-------------|----------| |
|||
| **Sequential** | A → B → C | Research → Analysis pipeline | |
|||
| **Parallel** | A, B, C simultaneously | Multiple searches at once | |
|||
| **Fan-Out/Fan-In** | Split → Process → Merge | Distributed research | |
|||
| **Conditional Routing** | If/else agent selection | Route by query type | |
|||
|
|||
--- |
|||
|
|||
## How the Three Protocols Work Together |
|||
|
|||
Here's the key insight: **MCP, A2A, and ADK are not competitors — they're complementary layers of a complete agent system.** |
|||
|
|||
 |
|||
|
|||
Each protocol handles a different concern: |
|||
|
|||
| Layer | Protocol | Question It Answers | |
|||
|-------|----------|-------------------| |
|||
| **Top** | ADK | "How are agents organized?" | |
|||
| **Middle** | A2A | "How do agents communicate?" | |
|||
| **Bottom** | MCP | "What tools can agents use?" | |
|||
|
|||
--- |
|||
|
|||
## Our Project: Multi-Agent Research Assistant |
|||
|
|||
### Built With |
|||
|
|||
- **.NET 10.0** — Latest runtime |
|||
- **ABP Framework 10.0.2** — Enterprise .NET application framework |
|||
- **Semantic Kernel 1.70.0** — Microsoft's AI orchestration SDK |
|||
- **Azure OpenAI (GPT)** — LLM backbone |
|||
- **Tavily Search API** — Real-time web search |
|||
- **SQL Server** — Research persistence |
|||
- **MCP SDK** (`ModelContextProtocol` 0.8.0-preview.1) |
|||
- **A2A SDK** (`A2A` 0.3.3-preview) |
|||
|
|||
|
|||
### How It Works (Step by Step) |
|||
|
|||
**Step 1: User Submits a Query** |
|||
|
|||
For example, the user specifies a field of research in the dashboard: *“Compare the latest AI agent frameworks: LangChain, Semantic Kernel, and AutoGen”*, and then specifies execution mode as ADK-Sequential or A2A. |
|||
|
|||
**Step 2: Orchestrator Activates** |
|||
|
|||
The `ResearchOrchestrator` receives the query and constructs the `AgentContext`. In ADK mode, it constructs a `SequentialAgent` with two sub-agents; in A2A mode, it uses the `A2AServer` to send the tasks. |
|||
|
|||
**Step 3: Researcher Agent Goes to Work** |
|||
|
|||
The Researcher Agent: |
|||
- Receives the query from the context |
|||
- Uses GPT to formulate optimal search queries |
|||
- Calls the `web_search` MCP tool (powered by Tavily API) |
|||
- Collects and synthesizes raw research data |
|||
- Stores results in the shared `AgentContext` |
|||
|
|||
**Step 4: Analysis Agent Takes Over** |
|||
|
|||
The Analysis Agent: |
|||
- Reads the Researcher's raw data from `AgentContext` |
|||
- Uses GPT to perform deep analysis |
|||
- Generates a structured Markdown report with sections: |
|||
- Executive Summary |
|||
- Key Findings |
|||
- Detailed Analysis |
|||
- Comparative Assessment |
|||
- Conclusion and Recommendations |
|||
- Calls MCP tools to save the report to both filesystem and database |
|||
|
|||
**Step 5: Results Returned** |
|||
|
|||
The orchestrator collects all results and returns them to the user via the REST API. The dashboard displays the research report, analysis report, agent event timeline, and raw data. |
|||
|
|||
|
|||
### Two Execution Modes |
|||
|
|||
Our system supports two execution modes, demonstrating both ADK and A2A approaches: |
|||
|
|||
#### Mode 1: ADK Sequential Pipeline |
|||
|
|||
Agents are organized as a `SequentialAgent`. State flows automatically through the pipeline via `AgentContext`. This is an in-process approach — fast and simple. |
|||
|
|||
 |
|||
|
|||
#### Mode 2: A2A Protocol-Based |
|||
|
|||
Agents communicate via the A2A protocol. The Orchestrator sends `AgentTask` objects to each agent through the `A2AServer`. Each agent has its own `AgentCard` for discovery. |
|||
|
|||
 |
|||
|
|||
### The Dashboard |
|||
|
|||
The UI provides a complete research experience: |
|||
|
|||
- **Hero Section** with system description and protocol badges |
|||
- **Architecture Cards** showing all four components (Researcher, Analyst, MCP Tools, Orchestrator) |
|||
- **Research Form** with query input and mode selection |
|||
- **Live Pipeline Status** tracking each stage of execution |
|||
- **Tabbed Results** view: Research Report, Analysis Report, Raw Data, Agent Events |
|||
- **Research History** table with past queries and their results |
|||
|
|||
|
|||
 |
|||
|
|||
 |
|||
|
|||
--- |
|||
|
|||
## Why ABP Framework? |
|||
|
|||
We chose ABP Framework as our .NET application foundation. Here's why it was a natural fit: |
|||
|
|||
| ABP Feature | How We Used It | |
|||
|-------------|---------------| |
|||
| **Auto API Controllers** | `ResearchAppService` automatically becomes REST API endpoints | |
|||
| **Dependency Injection** | Clean registration of agents, tools, orchestrator, Semantic Kernel | |
|||
| **Repository Pattern** | `IRepository<ResearchRecord>` for database operations in MCP tools | |
|||
| **Module System** | All agent ecosystem config encapsulated in `AgentEcosystemModule` | |
|||
| **Entity Framework Core** | Research record persistence with code-first migrations | |
|||
| **Built-in Auth** | OpenIddict integration for securing agent endpoints | |
|||
| **Health Checks** | Monitoring agent ecosystem health | |
|||
|
|||
ABP's single layer template provided us the best .NET groundwork, which had all the enterprise features without any unnecessary complexity for a focused AI project. Of course, the agent architecture (MCP, A2A, ADK) is actually framework-agnostic and can be implemented with any .NET application. |
|||
|
|||
--- |
|||
|
|||
## Key Takeaways |
|||
|
|||
### 1. Protocols Are Complementary, Not Competing |
|||
|
|||
MCP, A2A, and ADK solve different problems. Using them together creates a complete agent system: |
|||
- **MCP**: Standardize tool access |
|||
- **A2A**: Standardize inter-agent communication |
|||
- **ADK**: Standardize agent orchestration |
|||
|
|||
### 2. Start Simple, Scale Later |
|||
|
|||
Our approach runs all of that in a single process, which is in-process A2A. Using A2A allowed us to design the code so that each agent can be extracted into its own microservice later on without affecting the code logic. |
|||
|
|||
### 3. Shared State > Message Passing (For Simple Cases) |
|||
|
|||
ADK's `AgentContext` with shared state is simpler and faster than A2A message passing for in-process scenarios. Use A2A when agents need to run as separate services. |
|||
|
|||
### 4. MCP is the Real Game-Changer |
|||
|
|||
The ability to define tools once and have any agent use them — with automatic discovery and structured invocations — eliminates enormous amounts of boilerplate code. |
|||
|
|||
### 5. LLM Abstraction is Critical |
|||
|
|||
Using Semantic Kernel's `IChatCompletionService` lets you swap between Azure OpenAI, OpenAI, Ollama, or any provider without touching agent code. |
|||
|
|||
--- |
|||
|
|||
## What's Next? |
|||
|
|||
This project demonstrates the foundation of a multi-agent system. Future enhancements could include: |
|||
|
|||
- **Streaming responses** — Real-time updates as agents work (A2A supports this) |
|||
- **More specialized agents** — Code analysis, translation, fact-checking agents |
|||
- **Distributed deployment** — Each agent as a separate microservice with HTTP-based A2A |
|||
- **Agent marketplace** — Discover and integrate third-party agents via A2A Agent Cards |
|||
- **Human-in-the-loop** — Using A2A's `InputRequired` state for human approval steps |
|||
- **RAG integration** — MCP tools for vector database search |
|||
|
|||
--- |
|||
|
|||
## Resources |
|||
|
|||
| Resource | Link | |
|||
|----------|------| |
|||
| **MCP Specification** | [modelcontextprotocol.io](https://modelcontextprotocol.io) | |
|||
| **A2A Specification** | [google.github.io/A2A](https://google.github.io/A2A) | |
|||
| **ADK Documentation** | [google.github.io/adk-docs](https://google.github.io/adk-docs) | |
|||
| **ABP Framework** | [abp.io](https://abp.io) | |
|||
| **Semantic Kernel** | [github.com/microsoft/semantic-kernel](https://github.com/microsoft/semantic-kernel) | |
|||
| **MCP .NET SDK** | [NuGet: ModelContextProtocol](https://www.nuget.org/packages/ModelContextProtocol) | |
|||
| **A2A .NET SDK** | [NuGet: A2A](https://www.nuget.org/packages/A2A) | |
|||
| **Our Source Code** | [GitHub Repository](https://github.com/fahrigedik/agent-ecosystem-in-abp) | |
|||
|
|||
--- |
|||
|
|||
## Conclusion |
|||
|
|||
Developing a multi-agent AI system is no longer a futuristic dream; it’s something that can actually be achieved today by using open protocols and available frameworks. In this manner, by using **MCP** for access to tools, **A2A** for communicating between agents, and **ADK** for orchestration, we have actually built a Research Assistant. |
|||
|
|||
ABP Framework and .NET turned out to be an excellent choice, delivering us the infrastructure we needed to implement DI, repositories, auto APIs, and modules, allowing us to work completely on the AI agent architecture. |
|||
|
|||
The era of single LLM calls is ending, and the era of agent ecosystems begins now. |
|||
|
|||
--- |
|||
|
After Width: | Height: | Size: 48 KiB |
|
After Width: | Height: | Size: 52 KiB |
|
After Width: | Height: | Size: 22 KiB |
|
After Width: | Height: | Size: 86 KiB |
|
After Width: | Height: | Size: 126 KiB |
|
After Width: | Height: | Size: 54 KiB |
|
After Width: | Height: | Size: 61 KiB |
|
After Width: | Height: | Size: 169 KiB |
|
After Width: | Height: | Size: 16 KiB |
|
After Width: | Height: | Size: 17 KiB |
|
After Width: | Height: | Size: 14 KiB |
|
After Width: | Height: | Size: 17 KiB |
|
After Width: | Height: | Size: 358 KiB |
@ -0,0 +1,728 @@ |
|||
# Implementing Multiple Global Query Filters with Entity Framework Core |
|||
|
|||
Global query filters are one of Entity Framework Core's most powerful features for automatically filtering data based on certain conditions. They allow you to define filter criteria at the entity level that are automatically applied to all LINQ queries, making it impossible for developers to accidentally forget to include important filtering logic. In this article, we'll explore how to implement multiple global query filters in ABP Framework, covering built-in filters, custom filters, and performance optimization techniques. |
|||
|
|||
By the end of this guide, you'll understand how ABP Framework's data filtering system works, how to create custom global query filters for your specific business requirements, how to combine multiple filters effectively, and how to optimize filter performance using user-defined functions. |
|||
|
|||
## Understanding Global Query Filters in EF Core |
|||
|
|||
Global query filters were introduced in EF Core 2.0 and allow you to automatically append LINQ predicates to queries generated for an entity type. This is particularly useful for scenarios like multi-tenancy, soft delete, data isolation, and row-level security. |
|||
|
|||
In traditional applications, developers must remember to add filter conditions manually to every query: |
|||
|
|||
```csharp |
|||
// Manual filtering - error-prone and tedious |
|||
var activeBooks = await _bookRepository |
|||
.GetListAsync(b => b.IsDeleted == false && b.TenantId == currentTenantId); |
|||
``` |
|||
|
|||
With global query filters, this logic is applied automatically: |
|||
|
|||
```csharp |
|||
// Filter is applied automatically - no manual filtering needed |
|||
var activeBooks = await _bookRepository.GetListAsync(); |
|||
``` |
|||
|
|||
ABP Framework provides a sophisticated data filtering system built on top of EF Core's global query filters, with built-in support for soft delete, multi-tenancy, and the ability to easily create custom filters. |
|||
|
|||
### Important: Plain EF Core vs ABP Composition |
|||
|
|||
In plain EF Core, calling `HasQueryFilter` multiple times for the same entity does **not** create multiple active filters. The last call replaces the previous one (unless you use newer named-filter APIs in recent EF Core versions). |
|||
|
|||
ABP provides `HasAbpQueryFilter` to compose query filters safely. This method combines your custom filter with ABP's built-in filters (such as `ISoftDelete` and `IMultiTenant`) and with other `HasAbpQueryFilter` calls. |
|||
|
|||
## ABP Framework's Data Filtering System |
|||
|
|||
ABP's data filtering system is defined in the `Volo.Abp.Data` namespace and provides a consistent way to manage filters across your application. The core interface is `IDataFilter<TFilter>`, which allows you to enable or disable filters programmatically. |
|||
|
|||
### Built-in Filters |
|||
|
|||
ABP Framework comes with several built-in filters: |
|||
|
|||
1. **ISoftDelete**: Automatically filters out soft-deleted entities |
|||
2. **IMultiTenant**: Automatically filters entities by current tenant (for SaaS applications) |
|||
3. **IIsActive**: Filters entities based on active status |
|||
|
|||
Let's look at how these are implemented in the ABP framework: |
|||
|
|||
The `ISoftDelete` interface is straightforward: |
|||
|
|||
```csharp |
|||
namespace Volo.Abp; |
|||
|
|||
public interface ISoftDelete |
|||
{ |
|||
bool IsDeleted { get; } |
|||
} |
|||
``` |
|||
|
|||
Any entity implementing this interface will automatically have deleted records filtered out of queries. |
|||
|
|||
### Enabling and Disabling Filters |
|||
|
|||
ABP provides the `IDataFilter<TFilter>` service to control filter behavior at runtime: |
|||
|
|||
```csharp |
|||
public class BookAppService : ApplicationService |
|||
{ |
|||
private readonly IDataFilter<ISoftDelete> _softDeleteFilter; |
|||
private readonly IRepository<Book, Guid> _bookRepository; |
|||
|
|||
public BookAppService( |
|||
IDataFilter<ISoftDelete> softDeleteFilter, |
|||
IRepository<Book, Guid> bookRepository) |
|||
{ |
|||
_softDeleteFilter = softDeleteFilter; |
|||
_bookRepository = bookRepository; |
|||
} |
|||
|
|||
public async Task<List<Book>> GetAllBooksIncludingDeletedAsync() |
|||
{ |
|||
// Temporarily disable the soft delete filter |
|||
using (_softDeleteFilter.Disable()) |
|||
{ |
|||
return await _bookRepository.GetListAsync(); |
|||
} |
|||
} |
|||
|
|||
public async Task<List<Book>> GetActiveBooksAsync() |
|||
{ |
|||
// Filter is enabled by default - soft-deleted items are excluded |
|||
return await _bookRepository.GetListAsync(); |
|||
} |
|||
} |
|||
``` |
|||
|
|||
You can also check if a filter is enabled and enable/disable it programmatically: |
|||
|
|||
```csharp |
|||
public async Task ProcessBooksAsync() |
|||
{ |
|||
// Check if filter is enabled |
|||
if (_softDeleteFilter.IsEnabled) |
|||
{ |
|||
// Enable or disable explicitly |
|||
_softDeleteFilter.Enable(); |
|||
// or |
|||
_softDeleteFilter.Disable(); |
|||
} |
|||
} |
|||
``` |
|||
|
|||
## Creating Custom Global Query Filters |
|||
|
|||
Now let's create custom global query filters for a real-world scenario. Imagine we have a library management system where we need to filter books based on: |
|||
|
|||
1. **Publication Status**: Only show published books in public areas |
|||
2. **User's Department**: Users can only see books from their department |
|||
3. **Approval Status**: Only show approved content |
|||
|
|||
### Step 1: Define Filter Interfaces |
|||
|
|||
First, create the filter interfaces. You can define them in the same file as your entity or in separate files: |
|||
|
|||
```csharp |
|||
// Can be placed in the same file as Book entity or in separate files |
|||
namespace Library; |
|||
|
|||
public interface IPublishable |
|||
{ |
|||
bool IsPublished { get; } |
|||
DateTime PublishDate { get; set; } |
|||
} |
|||
|
|||
public interface IDepartmentRestricted |
|||
{ |
|||
Guid DepartmentId { get; } |
|||
} |
|||
|
|||
public interface IApproveable |
|||
{ |
|||
bool IsApproved { get; } |
|||
} |
|||
|
|||
public interface IPublishedFilter |
|||
{ |
|||
} |
|||
|
|||
public interface IApprovedFilter |
|||
{ |
|||
} |
|||
``` |
|||
|
|||
`IPublishable` / `IApproveable` are implemented by entities and define entity properties. |
|||
`IPublishedFilter` / `IApprovedFilter` are filter-state interfaces used with `IDataFilter` so you can enable/disable those filters at runtime. |
|||
|
|||
### Step 2: Add Filter Expressions to DbContext |
|||
|
|||
Now let's add the filter expressions to your existing DbContext. First, here's how to use `HasAbpQueryFilter` to create **always-on** filters (they cannot be toggled at runtime): |
|||
|
|||
```csharp |
|||
// MyProjectDbContext.cs |
|||
using Microsoft.EntityFrameworkCore; |
|||
using Volo.Abp.EntityFrameworkCore; |
|||
using Volo.Abp.GlobalFeatures; |
|||
using Volo.Abp.MultiTenancy; |
|||
using Volo.Abp.Authorization; |
|||
using Volo.Abp.Data; |
|||
using Volo.Abp.EntityFrameworkCore.Modeling; |
|||
|
|||
namespace Library; |
|||
|
|||
public class LibraryDbContext : AbpDbContext<LibraryDbContext> |
|||
{ |
|||
public DbSet<Book> Books { get; set; } |
|||
public DbSet<Department> Departments { get; set; } |
|||
public DbSet<Author> Authors { get; set; } |
|||
|
|||
public LibraryDbContext(DbContextOptions<LibraryDbContext> options) |
|||
: base(options) |
|||
{ |
|||
} |
|||
|
|||
protected override void OnModelCreating(ModelBuilder builder) |
|||
{ |
|||
base.OnModelCreating(builder); |
|||
|
|||
builder.Entity<Book>(b => |
|||
{ |
|||
b.ToTable("Books"); |
|||
b.ConfigureByConvention(); |
|||
|
|||
// HasAbpQueryFilter creates ALWAYS-ACTIVE filters |
|||
// These cannot be toggled at runtime via IDataFilter |
|||
b.HasAbpQueryFilter(book => |
|||
book.IsPublished && |
|||
book.PublishDate <= DateTime.UtcNow); |
|||
|
|||
b.HasAbpQueryFilter(book => book.IsApproved); |
|||
}); |
|||
|
|||
builder.Entity<Department>(b => |
|||
{ |
|||
b.ToTable("Departments"); |
|||
b.ConfigureByConvention(); |
|||
}); |
|||
} |
|||
} |
|||
``` |
|||
|
|||
> **Note:** Using `HasAbpQueryFilter` alone creates filters that are always active and cannot be toggled at runtime. This approach is simpler but less flexible. For toggleable filters, see Step 3 below. |
|||
|
|||
### Step 3: Make Filters Toggleable (Optional) |
|||
|
|||
If you need filters that can be enabled/disabled at runtime via `IDataFilter<T>`, override `ShouldFilterEntity` and `CreateFilterExpression` instead of (or in addition to) `HasAbpQueryFilter`: |
|||
|
|||
```csharp |
|||
// MyProjectDbContext.cs |
|||
using System; |
|||
using System.Linq.Expressions; |
|||
using Microsoft.EntityFrameworkCore; |
|||
using Microsoft.EntityFrameworkCore.Metadata; |
|||
using Microsoft.EntityFrameworkCore.Metadata.Builders; |
|||
using Volo.Abp.EntityFrameworkCore; |
|||
|
|||
namespace Library; |
|||
|
|||
public class LibraryDbContext : AbpDbContext<LibraryDbContext> |
|||
{ |
|||
protected bool IsPublishedFilterEnabled => DataFilter?.IsEnabled<IPublishedFilter>() ?? false; |
|||
protected bool IsApprovedFilterEnabled => DataFilter?.IsEnabled<IApprovedFilter>() ?? false; |
|||
|
|||
protected override bool ShouldFilterEntity<TEntity>(IMutableEntityType entityType) |
|||
{ |
|||
if (typeof(IPublishable).IsAssignableFrom(typeof(TEntity))) |
|||
{ |
|||
return true; |
|||
} |
|||
|
|||
if (typeof(IApproveable).IsAssignableFrom(typeof(TEntity))) |
|||
{ |
|||
return true; |
|||
} |
|||
|
|||
return base.ShouldFilterEntity<TEntity>(entityType); |
|||
} |
|||
|
|||
protected override Expression<Func<TEntity, bool>>? CreateFilterExpression<TEntity>( |
|||
ModelBuilder modelBuilder, |
|||
EntityTypeBuilder<TEntity> entityTypeBuilder) |
|||
where TEntity : class |
|||
{ |
|||
var expression = base.CreateFilterExpression<TEntity>(modelBuilder, entityTypeBuilder); |
|||
|
|||
if (typeof(IPublishable).IsAssignableFrom(typeof(TEntity))) |
|||
{ |
|||
Expression<Func<TEntity, bool>> publishFilter = e => |
|||
!IsPublishedFilterEnabled || |
|||
( |
|||
EF.Property<bool>(e, nameof(IPublishable.IsPublished)) && |
|||
EF.Property<DateTime>(e, nameof(IPublishable.PublishDate)) <= DateTime.UtcNow |
|||
); |
|||
|
|||
expression = expression == null |
|||
? publishFilter |
|||
: QueryFilterExpressionHelper.CombineExpressions(expression, publishFilter); |
|||
} |
|||
|
|||
if (typeof(IApproveable).IsAssignableFrom(typeof(TEntity))) |
|||
{ |
|||
Expression<Func<TEntity, bool>> approvalFilter = e => |
|||
!IsApprovedFilterEnabled || EF.Property<bool>(e, nameof(IApproveable.IsApproved)); |
|||
|
|||
expression = expression == null |
|||
? approvalFilter |
|||
: QueryFilterExpressionHelper.CombineExpressions(expression, approvalFilter); |
|||
} |
|||
|
|||
return expression; |
|||
} |
|||
} |
|||
``` |
|||
|
|||
This mapping step is what connects `IDataFilter<IPublishedFilter>` and `IDataFilter<IApprovedFilter>` to entity-level predicates. Without this step, `HasAbpQueryFilter` expressions remain always active. |
|||
|
|||
> **Important:** Note that we use `DateTime` (not `DateTime?`) in the filter expression to match the entity property type. Adjust accordingly if your entity uses nullable `DateTime?`. |
|||
|
|||
### Step 4: Disable Custom Filters with IDataFilter |
|||
|
|||
Once custom filters are mapped to the ABP data-filter pipeline, you can disable them just like built-in filters: |
|||
|
|||
```csharp |
|||
public class BookAppService : ApplicationService |
|||
{ |
|||
private readonly IRepository<Book, Guid> _bookRepository; |
|||
private readonly IDataFilter<IPublishedFilter> _publishedFilter; |
|||
private readonly IDataFilter<IApprovedFilter> _approvedFilter; |
|||
|
|||
public BookAppService( |
|||
IRepository<Book, Guid> bookRepository, |
|||
IDataFilter<IPublishedFilter> publishedFilter, |
|||
IDataFilter<IApprovedFilter> approvedFilter) |
|||
{ |
|||
_bookRepository = bookRepository; |
|||
_publishedFilter = publishedFilter; |
|||
_approvedFilter = approvedFilter; |
|||
} |
|||
|
|||
public async Task<List<Book>> GetIncludingUnpublishedAndUnapprovedAsync() |
|||
{ |
|||
using (_publishedFilter.Disable()) |
|||
using (_approvedFilter.Disable()) |
|||
{ |
|||
return await _bookRepository.GetListAsync(); |
|||
} |
|||
} |
|||
} |
|||
``` |
|||
|
|||
## Advanced: Multiple Filters with User-Defined Functions |
|||
|
|||
Starting from ABP v8.3, you can use user-defined function (UDF) mapping for better performance. This approach generates more efficient SQL and allows EF Core to create better execution plans. |
|||
|
|||
### Step 1: Enable UDF Mapping |
|||
|
|||
First, configure your module to use UDF mapping: |
|||
|
|||
```csharp |
|||
// MyProjectModule.cs |
|||
using Volo.Abp.EntityFrameworkCore; |
|||
using Volo.Abp.EntityFrameworkCore.GlobalFilters; |
|||
using Microsoft.Extensions.DependencyInjection; |
|||
|
|||
namespace Library; |
|||
|
|||
[DependsOn( |
|||
typeof(AbpEntityFrameworkCoreModule), |
|||
typeof(AbpDddDomainModule) |
|||
)] |
|||
public class LibraryModule : AbpModule |
|||
{ |
|||
public override void ConfigureServices(ServiceConfigurationContext context) |
|||
{ |
|||
Configure<AbpEfCoreGlobalFilterOptions>(options => |
|||
{ |
|||
options.UseDbFunction = true; // Enable UDF mapping |
|||
}); |
|||
} |
|||
} |
|||
``` |
|||
|
|||
### Step 2: Define DbFunctions |
|||
|
|||
Create static methods that EF Core will map to database functions: |
|||
|
|||
```csharp |
|||
// LibraryDbFunctions.cs |
|||
using Microsoft.EntityFrameworkCore; |
|||
|
|||
namespace Library; |
|||
|
|||
public static class LibraryDbFunctions |
|||
{ |
|||
public static bool IsPublishedFilter(bool isPublished, DateTime? publishDate) |
|||
{ |
|||
return isPublished && (publishDate == null || publishDate <= DateTime.UtcNow); |
|||
} |
|||
|
|||
public static bool IsApprovedFilter(bool isApproved) |
|||
{ |
|||
return isApproved; |
|||
} |
|||
|
|||
public static bool DepartmentFilter(Guid entityDepartmentId, Guid userDepartmentId) |
|||
{ |
|||
return entityDepartmentId == userDepartmentId; |
|||
} |
|||
} |
|||
``` |
|||
|
|||
### Step 4: Apply UDF Filters |
|||
|
|||
Update your DbContext to use the UDF-based filters: |
|||
|
|||
```csharp |
|||
// MyProjectDbContext.cs |
|||
protected override void OnModelCreating(ModelBuilder builder) |
|||
{ |
|||
base.OnModelCreating(builder); |
|||
|
|||
// Map CLR methods to SQL scalar functions. |
|||
// Create matching SQL functions in a migration. |
|||
var isPublishedMethod = typeof(LibraryDbFunctions).GetMethod( |
|||
nameof(LibraryDbFunctions.IsPublishedFilter), |
|||
new[] { typeof(bool), typeof(DateTime?) })!; |
|||
builder.HasDbFunction(isPublishedMethod); |
|||
|
|||
var isApprovedMethod = typeof(LibraryDbFunctions).GetMethod( |
|||
nameof(LibraryDbFunctions.IsApprovedFilter), |
|||
new[] { typeof(bool) })!; |
|||
builder.HasDbFunction(isApprovedMethod); |
|||
|
|||
builder.Entity<Book>(b => |
|||
{ |
|||
b.ToTable("Books"); |
|||
b.ConfigureByConvention(); |
|||
|
|||
// ABP way: define separate filters. HasAbpQueryFilter composes them. |
|||
b.HasAbpQueryFilter(book => |
|||
LibraryDbFunctions.IsPublishedFilter(book.IsPublished, book.PublishDate)); |
|||
|
|||
b.HasAbpQueryFilter(book => |
|||
LibraryDbFunctions.IsApprovedFilter(book.IsApproved)); |
|||
}); |
|||
} |
|||
``` |
|||
|
|||
This approach generates cleaner SQL and improves query performance, especially in complex scenarios with multiple filters. |
|||
|
|||
## Working with Complex Filter Combinations |
|||
|
|||
When combining multiple filters, it's important to understand how they interact. Let's explore some common scenarios. |
|||
|
|||
### Combining Tenant and Department Filters |
|||
|
|||
In a multi-tenant application, you might need to combine tenant isolation with department-level access control: |
|||
|
|||
```csharp |
|||
public class BookAppService : ApplicationService |
|||
{ |
|||
private readonly IRepository<Book, Guid> _bookRepository; |
|||
private readonly IDataFilter<IMultiTenant> _tenantFilter; |
|||
private readonly ICurrentUser _currentUser; |
|||
|
|||
public BookAppService( |
|||
IRepository<Book, Guid> bookRepository, |
|||
IDataFilter<IMultiTenant> tenantFilter, |
|||
ICurrentUser currentUser) |
|||
{ |
|||
_bookRepository = bookRepository; |
|||
_tenantFilter = tenantFilter; |
|||
_currentUser = currentUser; |
|||
} |
|||
|
|||
public async Task<List<BookDto>> GetMyDepartmentBooksAsync() |
|||
{ |
|||
var currentUser = _currentUser; |
|||
var userDepartmentId = GetUserDepartmentId(currentUser); |
|||
|
|||
// Get all books without department filter, then filter in memory |
|||
// (for scenarios where you need custom filter logic) |
|||
using (_tenantFilter.Disable()) // Optional: disable tenant filter if needed |
|||
{ |
|||
var allBooks = await _bookRepository.GetListAsync(); |
|||
|
|||
// Apply department filter in memory (custom logic) |
|||
var departmentBooks = allBooks |
|||
.Where(b => b.DepartmentId == userDepartmentId) |
|||
.ToList(); |
|||
|
|||
return ObjectMapper.Map<List<Book>, List<BookDto>>(departmentBooks); |
|||
} |
|||
} |
|||
|
|||
private Guid GetUserDepartmentId(ICurrentUser currentUser) |
|||
{ |
|||
// Get user's department from claims or database |
|||
var departmentClaim = currentUser.FindClaim("DepartmentId"); |
|||
return Guid.Parse(departmentClaim.Value); |
|||
} |
|||
} |
|||
``` |
|||
|
|||
### Filter Priority and Override |
|||
|
|||
Sometimes you need to override filters in specific scenarios. ABP provides a flexible way to handle this: |
|||
|
|||
```csharp |
|||
public async Task<Book> GetBookForEditingAsync(Guid id) |
|||
{ |
|||
// Disable soft delete filter to get deleted records for restoration |
|||
using (DataFilter.Disable<ISoftDelete>()) |
|||
{ |
|||
return await _bookRepository.GetAsync(id); |
|||
} |
|||
} |
|||
|
|||
public async Task<Book> GetBookIncludingUnpublishedAsync(Guid id) |
|||
{ |
|||
// Use GetQueryableAsync to customize the query |
|||
var query = await _bookRepository.GetQueryableAsync(); |
|||
|
|||
// Manually apply or bypass filters |
|||
var book = await query |
|||
.FirstOrDefaultAsync(b => b.Id == id); |
|||
|
|||
return book; |
|||
} |
|||
``` |
|||
|
|||
## Best Practices for Multiple Global Query Filters |
|||
|
|||
When implementing multiple global query filters, consider these best practices: |
|||
|
|||
### 1. Keep Filters Simple |
|||
|
|||
Complex filter expressions can significantly impact query performance. Keep each condition focused on a single concern. In ABP, you can define them separately with `HasAbpQueryFilter`, which composes with ABP's built-in filters: |
|||
|
|||
```csharp |
|||
// Good (ABP): separate, focused filters composed by HasAbpQueryFilter |
|||
b.HasAbpQueryFilter(b => b.IsPublished); |
|||
b.HasAbpQueryFilter(b => b.IsApproved); |
|||
b.HasAbpQueryFilter(b => b.DepartmentId == userDeptId); |
|||
|
|||
// Avoid: calling HasQueryFilter multiple times for the same entity |
|||
// in plain EF Core (the last call replaces the previous one) |
|||
b.HasQueryFilter(b => b.IsPublished); |
|||
b.HasQueryFilter(b => b.IsApproved); |
|||
``` |
|||
|
|||
### 2. Use Indexing |
|||
|
|||
Ensure your database has appropriate indexes for filtered columns: |
|||
|
|||
```csharp |
|||
builder.Entity<Book>(b => |
|||
{ |
|||
b.HasIndex(b => b.IsPublished); |
|||
b.HasIndex(b => b.IsApproved); |
|||
b.HasIndex(b => b.DepartmentId); |
|||
b.HasIndex(b => new { b.IsPublished, b.PublishDate }); |
|||
}); |
|||
``` |
|||
|
|||
### 3. Consider Performance Impact |
|||
|
|||
Use UDF mapping for better performance with complex filters. Profile your queries and analyze execution plans. |
|||
|
|||
### 4. Document Filter Behavior |
|||
|
|||
Clearly document which filters are applied to each entity to help developers understand the behavior: |
|||
|
|||
```csharp |
|||
/// <summary> |
|||
/// Book entity with the following global query filters: |
|||
/// - ISoftDelete: Automatically excludes soft-deleted books |
|||
/// - IMultiTenant: Automatically filters by current tenant |
|||
/// - IPublishable: Excludes unpublished books (based on IsPublished and PublishDate) |
|||
/// - IApproveable: Excludes unapproved books (based on IsApproved) |
|||
/// </summary> |
|||
/// <remarks> |
|||
/// Filter interfaces (IPublishable, IApproveable, IPublishedFilter, IApprovedFilter) |
|||
/// are defined in Step 1: Define Filter Interfaces |
|||
/// </remarks> |
|||
public class Book : AuditedAggregateRoot<Guid>, ISoftDelete, IMultiTenant, IPublishable, IApproveable |
|||
{ |
|||
public string Name { get; set; } |
|||
|
|||
public BookType Type { get; set; } |
|||
|
|||
public DateTime PublishDate { get; set; } |
|||
|
|||
public float Price { get; set; } |
|||
|
|||
public bool IsPublished { get; set; } |
|||
|
|||
public bool IsApproved { get; set; } |
|||
|
|||
public Guid? TenantId { get; set; } |
|||
|
|||
public bool IsDeleted { get; set; } |
|||
|
|||
public Guid DepartmentId { get; set; } |
|||
} |
|||
``` |
|||
|
|||
## Testing Global Query Filters |
|||
|
|||
Testing with global query filters can be challenging. Here's how to do it effectively: |
|||
|
|||
### Unit Testing Filters |
|||
|
|||
```csharp |
|||
[Fact] |
|||
public void Book_QueryFilter_Should_Filter_Unpublished() |
|||
{ |
|||
var options = new DbContextOptionsBuilder<BookStoreDbContext>() |
|||
.UseInMemoryDatabase(databaseName: "TestDb") |
|||
.Options; |
|||
|
|||
using (var context = new BookStoreDbContext(options)) |
|||
{ |
|||
context.Books.Add(new Book { Name = "Published Book", IsPublished = true }); |
|||
context.Books.Add(new Book { Name = "Unpublished Book", IsPublished = false }); |
|||
context.SaveChanges(); |
|||
} |
|||
|
|||
using (var context = new BookStoreDbContext(options)) |
|||
{ |
|||
// Query with filter enabled (default) |
|||
var publishedBooks = context.Books.ToList(); |
|||
Assert.Single(publishedBooks); |
|||
Assert.Equal("Published Book", publishedBooks[0].Name); |
|||
} |
|||
} |
|||
``` |
|||
|
|||
### Integration Testing with Filter Control |
|||
|
|||
```csharp |
|||
[Fact] |
|||
public async Task Should_Get_Deleted_Book_When_Filter_Disabled() |
|||
{ |
|||
var dataFilter = GetRequiredService<IDataFilter>(); |
|||
|
|||
// Arrange |
|||
var book = await _bookRepository.InsertAsync( |
|||
new Book { Name = "Test Book" }, |
|||
autoSave: true |
|||
); |
|||
|
|||
await _bookRepository.DeleteAsync(book); |
|||
|
|||
// Act - with filter disabled |
|||
using (dataFilter.Disable<ISoftDelete>()) |
|||
{ |
|||
var deletedBook = await _bookRepository |
|||
.FirstOrDefaultAsync(b => b.Id == book.Id); |
|||
|
|||
deletedBook.ShouldNotBeNull(); |
|||
deletedBook.IsDeleted.ShouldBeTrue(); |
|||
} |
|||
} |
|||
``` |
|||
|
|||
### Testing Custom Global Query Filters |
|||
|
|||
Here's a complete example of testing custom toggleable filters: |
|||
|
|||
```csharp |
|||
[Fact] |
|||
public async Task Should_Filter_Unpublished_Books_By_Default() |
|||
{ |
|||
// Default: filters are enabled |
|||
var result = await WithUnitOfWorkAsync(async () => |
|||
{ |
|||
var bookRepository = GetRequiredService<IRepository<Book, Guid>>(); |
|||
return await bookRepository.GetListAsync(); |
|||
}); |
|||
|
|||
// Only published and approved books should be returned |
|||
result.All(b => b.IsPublished).ShouldBeTrue(); |
|||
result.All(b => b.IsApproved).ShouldBeTrue(); |
|||
} |
|||
|
|||
[Fact] |
|||
public async Task Should_Return_All_Books_When_Filter_Disabled() |
|||
{ |
|||
var result = await WithUnitOfWorkAsync(async () => |
|||
{ |
|||
// Disable the published filter to see unpublished books |
|||
using (_publishedFilter.Disable()) |
|||
{ |
|||
var bookRepository = GetRequiredService<IRepository<Book, Guid>>(); |
|||
return await bookRepository.GetListAsync(); |
|||
} |
|||
}); |
|||
|
|||
// Should include unpublished books |
|||
result.Any(b => b.Name == "Unpublished Book").ShouldBeTrue(); |
|||
} |
|||
|
|||
[Fact] |
|||
public async Task Should_Combine_Filters_Correctly() |
|||
{ |
|||
// Test combining multiple filter disables |
|||
using (_publishedFilter.Disable()) |
|||
using (_approvedFilter.Disable()) |
|||
{ |
|||
var bookRepository = GetRequiredService<IRepository<Book, Guid>>(); |
|||
var allBooks = await bookRepository.GetListAsync(); |
|||
|
|||
// All books should be visible |
|||
allBooks.Count.ShouldBe(5); |
|||
} |
|||
} |
|||
``` |
|||
|
|||
> **Tip:** When using ABP's test base, inject `IDataFilter<IPublishedFilter>` and `IDataFilter<IApprovedFilter>` to control filters in your tests. |
|||
|
|||
## Key Takeaways |
|||
|
|||
✅ **Global query filters automatically apply filter criteria to all queries**, reducing developer error and ensuring consistent data filtering across your application. |
|||
|
|||
✅ **ABP Framework provides a sophisticated data filtering system** with built-in support for soft delete (`ISoftDelete`) and multi-tenancy (`IMultiTenant`), plus the ability to create custom filters. |
|||
|
|||
✅ **Use `IDataFilter<TFilter>` to control filters at runtime**, enabling or disabling filters as needed for specific operations. |
|||
|
|||
✅ **To make custom filters toggleable, override `ShouldFilterEntity` and `CreateFilterExpression`** in your DbContext. Using only `HasAbpQueryFilter` creates filters that are always active. |
|||
|
|||
✅ **Combine multiple filters carefully** and consider performance implications, especially with complex filter expressions. |
|||
|
|||
✅ **Leverage user-defined function (UDF) mapping** for better SQL generation and query performance, available since ABP v8.3. |
|||
|
|||
✅ **Always test filter behavior** to ensure filters work as expected in different scenarios, including edge cases. |
|||
|
|||
## Conclusion |
|||
|
|||
Global query filters are essential for building secure, well-isolated applications. ABP Framework's data filtering system provides a robust foundation that builds on EF Core's capabilities while adding convenient features like runtime filter control and UDF mapping optimization. |
|||
|
|||
By implementing multiple global query filters strategically, you can ensure data isolation, simplify your query logic, and reduce the risk of accidentally exposing unauthorized data. Remember to keep filters simple, add appropriate database indexes, and test thoroughly to maintain optimal performance. |
|||
|
|||
Start implementing global query filters in your ABP applications today to leverage automatic data filtering across all your repositories and queries. |
|||
|
|||
### See Also |
|||
|
|||
- [ABP Data Filtering Documentation](https://abp.io/docs/latest/framework/fundamentals/data-filtering) |
|||
- [EF Core Global Query Filters](https://learn.microsoft.com/en-us/ef/core/querying/filters) |
|||
- [ABP Multi-Tenancy Documentation](https://abp.io/docs/latest/framework/fundamentals/multi-tenancy) |
|||
- [Using User-defined function mapping for global filters](https://abp.io/docs/latest/framework/infrastructure/data-filtering#using-user-defined-function-mapping-for-global-filters) |
|||
|
|||
--- |
|||
|
|||
## References |
|||
|
|||
- [ABP Framework Documentation](https://docs.abp.io) |
|||
- [Entity Framework Core Documentation](https://docs.microsoft.com/en-us/ef/core/) |
|||
- [EF Core Global Query Filters](https://learn.microsoft.com/en-us/ef/core/querying/filters) |
|||
- [User-defined Function Mapping](https://learn.microsoft.com/en-us/ef/core/querying/user-defined-function-mapping) |
|||
@ -0,0 +1 @@ |
|||
Global query filters in Entity Framework Core allow automatic data filtering at the entity level. This article covers ABP Framework's data filtering system, including built-in filters (ISoftDelete, IMultiTenant), custom filter implementation, and performance optimization using user-defined functions. |
|||
@ -0,0 +1,50 @@ |
|||
|
|||
The software development world converged on the **Queen Elizabeth II Centre** in Westminster from **January 26-30** for **NDC London 2026**. As one of the most anticipated tech conferences in Europe, this year’s event delivered a masterclass in the future of the stack. |
|||
|
|||
We have spent five days immersed in workshops and sessions. Here is our comprehensive recap of the highlights and the technical shifts that will define 2026\. |
|||
|
|||
 |
|||
|
|||
## **1\. High-Performance .NET and C\# Evolution** |
|||
|
|||
A major focus this year was the continued evolution of the .NET ecosystem. Experts delivered standout sessions on high-performance coding patterns, it’s clear that efficiency and "Native AOT" (Ahead-of-Time compilation) are no longer niche topics, they are becoming industry standards. |
|||
|
|||
## **1\. Moving Beyond the AI Hype** |
|||
|
|||
If 2025 was about experimenting with LLMs, NDC London 2026 was about AI integration. Sessions from experts showcased how developers are moving past simple chatbots and integrating AI directly into the CI/CD pipeline and automated testing suites. |
|||
|
|||
 |
|||
|
|||
 |
|||
|
|||
## **3\. The "Hallway Track" and Community Networking** |
|||
|
|||
One of the biggest draws of **NDC London** is the community. Between the 100+ sessions, the exhibitor hall was buzzing with live demos and networking. |
|||
|
|||
Watch the video: |
|||
|
|||
[](https://www.youtube.com/watch?v=yb-FILkqL7U) |
|||
|
|||
 |
|||
 |
|||
|
|||
## **4\. The Big Giveaway: Our Xbox Series S Raffle** |
|||
|
|||
One of our favorite moments of the week was our Raffle Session. We love giving back to the community that inspires us, and this year, the energy at our booth was higher than ever. |
|||
|
|||
We were thrilled to give away a brand-new Xbox Series S to one lucky winner\! It was fantastic to meet so many of you who stopped by to enter, chat about your current projects, and share your thoughts on the future of the industry. |
|||
|
|||
**Congratulations again to our 2026 winner\!** We hope you enjoy some well-deserved gaming time after a long week of learning. |
|||
|
|||
 |
|||
|
|||
Watch the video: |
|||
|
|||
[](https://www.youtube.com/watch?v=W5HRwys8dpE) |
|||
|
|||
|
|||
## **Final Thoughts: See You at NDC London 2027\!** |
|||
|
|||
NDC London 2026 proved once again why it is a cornerstone event for the global developer community. We are returning to our projects with a refreshed roadmap and a deeper understanding of the tools shaping our industry. |
|||
|
|||
 |
|||
|
After Width: | Height: | Size: 4.0 MiB |
|
After Width: | Height: | Size: 3.9 MiB |
|
After Width: | Height: | Size: 4.8 MiB |
|
After Width: | Height: | Size: 3.7 MiB |
|
After Width: | Height: | Size: 3.2 MiB |
|
After Width: | Height: | Size: 2.6 MiB |
|
After Width: | Height: | Size: 3.0 MiB |
|
After Width: | Height: | Size: 3.0 MiB |
|
After Width: | Height: | Size: 644 KiB |
|
After Width: | Height: | Size: 600 KiB |
@ -0,0 +1,325 @@ |
|||
 |
|||
|
|||
This year we attended NDC London as a sponsor for [ABP](https://abp.io). The conference was held at the same place [Queen Elizabeth II](https://qeiicentre.london/) as previous years. I guess this is the best conf for .NET developers around the world (thanks to the NDC team). And we attend last 5 years. It was 3 full days started from 28 to 30 January 2026. As an exhibitor we talked a lot with the attendees who stopped by our booth or while we were eating or in the conf rooms. |
|||
|
|||
This is the best opportunity to know what everyone is doing in software society. While I was explaining ABP to the people who first time heard, I also ask about what they do in their work. Developers mostly work on web platforms. And as you know, there's an AI transformation in our sector. That's why I wonder if other people also stick to the latest AI trend! Well... not as I expected. In Volosoft, we are tightly following AI trends, using in our daily development, injecting this new technology to our product and trying to benefit this as much as possible. |
|||
|
|||
 |
|||
|
|||
This new AI trend is same as the invention of printing (by Johannes Gutenberg in 1450) or it's similar to invention of calculators (by William S. Burroughs in 1886). The countries who benefit these inventions got a huge increase in their welfare level. So, we welcome this new AI invention in software development, design, devops and testing. I also see this as a big wave in the ocean, if you are prepared and develop your skills, you can play with it 🌊 and it's called surfing or you'll die against the AI wave in this ocean. But not all the companies react this transformation quickly. Many developers use it like ChatGpt conversation (copy-paste from it) or using GitHub Co-Pilot in a limited manner. But as I heard from Steven Sanderson's session and other Microsoft employees, they are already using it to reproduce the bugs reported in the issues or creating even feature PRs via Co-Pilot. That's a good! |
|||
|
|||
Here're some pictures from the conf and that's me on the left side with brown shoes :) |
|||
|
|||
 |
|||
|
|||
Another thing I see, there's a decrease in the number of attendees'. I don't know the real reason but probably the IT companies cut the budget for conferences. As you also hear, many companies layoff because of the AI replaces some of the positions. |
|||
|
|||
The food was great during the conference. It was more like eating sessions for me. Lots of good meals from different countries' kitchen. In the second day, there was a party. People grabbed their beers, wines, beverages and did some networking. |
|||
|
|||
I was expecting more AI oriented sessions but it was less then my expectations. Even though I was an exhibitor, I tried to attend some of the session. I'll tell you my notes. |
|||
|
|||
--- |
|||
|
|||
Here's a quick video from the exhibitors' area on the 3rd floor and our ABP booth's Xbox raffle: |
|||
|
|||
**Video 1: NDC Conference 2026 Environment** 👉 [https://youtu.be/U1kiYG12KgA](https://youtu.be/U1kiYG12KgA) |
|||
|
|||
[](https://youtu.be/U1kiYG12KgA) |
|||
|
|||
|
|||
**Video 2: Our raffle for XBOX** 👉 [https://youtu.be/7o0WX70qYw0](https://youtu.be/7o0WX70qYw0) |
|||
[](https://youtu.be/7o0WX70qYw0) |
|||
|
|||
--- |
|||
|
|||
|
|||
## Sessions / Talks |
|||
|
|||
### The Dangers of Probably-Working Software | Damian Brady |
|||
|
|||
 |
|||
|
|||
The first session and keynote was from Damian Brady. He's part of Developer Advocacy team at GitHub. And the topic was "The dangers of probably-working software". He started with some negative impact of how generative AI is killing software, and he ended like this a not so bad, we can benefit from the AI transformation. First time I hear "sleepwalking" term for the development. He was telling when we generate code via AI, and if we don't review well-enough, we're sleepwalkers. And that's correct! and good analogy for this case. This talk centers on a powerful lesson: *“**Don’t ship code you don’t truly understand.**”* |
|||
Damian tells a personal story from his early .NET days when he implemented a **Huffman compression algorithm** based largely on Wikipedia. The code **“worked” in small tests** but **failed in production**. The experience forced him to deeply understand the algorithm rather than relying on copied solutions. Through this story, he explores themes of trust, complexity, testing, and mental models in software engineering. |
|||
|
|||
#### Notes From This Session |
|||
|
|||
- “It seems to work” is not the same as “I understand it.” |
|||
- Code copied from Wikipedia or StackOverflow or AI platforms is inherently risky in production. |
|||
- Passing tests on small datasets does not guarantee real-world reliability (happy path ~= unhappy results) |
|||
- Performance issues often surface only in edge cases. |
|||
- Delivery pressure can discourage deep understanding — to the detriment of quality. |
|||
- Always ask: “**When does this fail?**” — not just “**Why does this work?**” |
|||
|
|||
--- |
|||
|
|||
|
|||
|
|||
### Playing The Long Game | Sheena O'Connell |
|||
|
|||
 |
|||
|
|||
Sheena is a former software engineer who now trains and supports tech educators. She talks about AI tools... |
|||
AI tools are everywhere but poorly understood; there’s hype, risks, and mixed results. The key question is how individuals and organisations should play the long game (long-term strategy) so skilled human engineers—especially juniors—can still grow and thrive. |
|||
She showed some statistics about how job postings on Indeed platform dramatically decreasing for software developers. About AI generated-code, she tells, it's less secure, there might be logical problems or interesting bugs, human might not read code very well and understanding/debugging code might sometimes take much longer time. |
|||
|
|||
Being an engineer is about much more than a job title — it requires systems thinking, clear communication, dealing with uncertainty, continuous learning, discipline, and good knowledge management. The job market is shifting: demand for AI-skilled workers is rising quickly and paying premiums, and required skills are changing faster in AI-exposed roles. There’s strength in using a diversity of models instead of locking into one provider, and guardrails improve reliability. |
|||
|
|||
AI is creating new roles (like AI security, observability, and operations) and new kinds of work, while routine attrition also opens opportunities. At the same time, heavy AI use can have negative cognitive effects: people may think less, feel lonelier, and prefer talking to AI over humans. |
|||
|
|||
Organizations are becoming more dynamic and project-based, with shorter planning cycles, higher trust, and more experimentation — but also risk of “shiny new toy” syndrome. Research shows AI can boost productivity by 15–20% in many cases, especially in simpler, greenfield projects and popular languages, but it can actually reduce productivity on very complex work. Overall, the recommendation is to focus on using AI well (not just the newest model), add monitoring and guardrails, keep flexibility, and build tools that allow safe experimentation. |
|||
|
|||
 |
|||
|
|||
We’re in a messy, fast-moving AI era where LLM tools are everywhere but poorly understood. There’s a lot of hype and marketing noise, making it hard even for technical people to separate reality from fantasy. Different archetypes have emerged — from AI-optimists to skeptics — and both extremes have risks. AI is great for quick prototyping but unreliable for complex work, so teams need guardrails, better practices, and a focus on learning rather than “writing more code faster.” The key question is how individuals and organizations can play the long game so strong human engineers — especially juniors — can still grow and thrive in an AI-driven world. |
|||
|
|||
 |
|||
|
|||
--- |
|||
|
|||
### Crafting Intelligent Agents with Context Engineering | Carly Richmond |
|||
|
|||
 |
|||
|
|||
Carly is a Developer Advocate Lead at Elastic in London with deep experience in web development and agile delivery from her years in investment banking. A practical UI engineer. She brings a clear, hands-on perspective to building real-world AI systems. In her talk on **“Crafting Intelligent Agents with Context Engineering,”** she argues that prompt engineering isn’t enough — and shows how carefully shaping context across data, tools, and systems is key to creating reliable, useful AI agents. She mentioned about the context of an AI process. The context consists of Instructions, Short Memory, Long Memory, RAG, User Prompts, Tools, Structured Output. |
|||
|
|||
--- |
|||
|
|||
|
|||
|
|||
### Modular Monoliths | Kevlin Henney |
|||
|
|||
 |
|||
|
|||
Kevlin frames the “microservices vs monolith” debate as a false dichotomy. His core argument is simple but powerful: problems rarely come from *being a monolith* — they come from being a **poorly structured one**. Modularity is not a deployment choice; it is an architectural discipline. |
|||
|
|||
#### **Notes from the Talk** |
|||
|
|||
- A monolith is not inherently bad; a tangled (intertwined, complex) monolith is. |
|||
- Architecture is mostly about **boundaries**, not boxes. |
|||
- If you cannot draw clean internal boundaries, you are not ready for microservices. |
|||
- Dependencies reveal your real architecture better than diagrams. |
|||
- Teams shape systems more than tools do. |
|||
- Splitting systems prematurely increases complexity without increasing clarity. |
|||
- Good modular design makes systems **easier to change, not just easier to scale**. |
|||
|
|||
#### **So As a Developer;** |
|||
|
|||
- Start with a well-structured modular monolith before considering microservices. |
|||
- Treat modules as real first-class citizens: clear ownership, clear contracts. |
|||
- Make dependency direction explicit — no circular graphs. |
|||
- Use internal architectural tests to prevent boundary violations. |
|||
- Organize code by *capability*, not by technical layer. |
|||
- If your team structure is messy, your architecture will be messy — fix people, not tech. |
|||
|
|||
--- |
|||
|
|||
### AI Coding Agents & Skills | Steve Sanderson |
|||
|
|||
**Being productive with AI Agents** |
|||
|
|||
 |
|||
|
|||
In this session, Steve started how Microsoft is excessively using AI tools for PRs, reproducing bug reports etc... He's now working on **GitHub Co-Pilot Coding Agent Runtime Team**. He says, we use brains and hands less then anytime. |
|||
|
|||
 |
|||
|
|||
**In 1 Week 293 PRs Opened by the help of AI** |
|||
|
|||
 |
|||
|
|||
**He created a new feature to Copilot with the help of Copilot in minutes** |
|||
|
|||
 |
|||
|
|||
> Code is cheap! Prototypes are almost free! |
|||
|
|||
And he summarized the AI assisted development into 10 outlines. These are Subagents, Plan Mode, Skills, Delegate, Memories, Hooks, MCP, Infinite Sessions, Plugins and Git Workflow. Let's see his statements for each of these headings: |
|||
|
|||
#### **1. Subagents** |
|||
|
|||
 |
|||
|
|||
- Break big problems into smaller, specialized agents. |
|||
- Each subagent should have a clear responsibility and limited scope. |
|||
- Parallel work is better than one “smart but slow” agent. |
|||
- Reduces hallucination by narrowing context per agent. |
|||
- Easier to debug: you can inspect each agent’s output separately. |
|||
|
|||
|
|||
------ |
|||
|
|||
#### **2. Plan Mode** |
|||
|
|||
 |
|||
|
|||
- Always start with a plan before generating code. |
|||
- The plan should be explicit, human-readable, and reviewable. |
|||
- You'll align your expectations with the AI's next steps. |
|||
- Prevents wasted effort on wrong directions. |
|||
- Encourages structured thinking instead of trial-and-error coding. |
|||
|
|||
------ |
|||
|
|||
#### **3. Skills** |
|||
|
|||
 |
|||
|
|||
- These are just Markdown files but (can be also tools, scripts as well) |
|||
- Skills are reusable capabilities for AI agents. |
|||
- You cannot just give all the info (as Markdown) to the AI context (limited!), skills are being used when necessary (by their Description field) |
|||
- Treat skills like APIs: versioned, documented, and shareable. |
|||
- Prefer many small skills over one big skill set. |
|||
- Store skills in Git, not in chat history. |
|||
- Skills should integrate with real tools (CI, GitHub, browsers, etc.). |
|||
|
|||
#### 3.1 Skill > Test Your Project Skill |
|||
|
|||
 |
|||
|
|||
------ |
|||
|
|||
#### **4. Delegate** |
|||
|
|||
> didn't mention much about this topic |
|||
|
|||
- “Delegate” refers to **offloading local work to the cloud**. |
|||
- Using remote computers for AI stuff not your local resources (agent continues the task remotely) |
|||
|
|||
##### **Ralph Force Do While Over and Over Until It Finishes** |
|||
|
|||
https://awesomeclaude.ai/ralph-wiggum |
|||
|
|||
> Who knows how much tokens it uses :) |
|||
|
|||
 |
|||
|
|||
------ |
|||
|
|||
#### **5. Memories** |
|||
|
|||
> didn't mention much about this topic |
|||
|
|||
- It's like don't write tests like this but write like that, and AI will remember it among your team members. |
|||
|
|||
- Copilot Memory allows Copilot to learn about your codebase, helping Copilot coding agent, Copilot code review, and Copilot CLI to work more effectively in a repository. |
|||
|
|||
- Treat memory like documentation that evolves over time. |
|||
|
|||
- Copilot Memory is **turned off by default** |
|||
|
|||
- https://docs.github.com/en/copilot/how-tos/use-copilot-agents/copilot-memory |
|||
|
|||
|
|||
|
|||
------ |
|||
|
|||
#### **6. Hooks** |
|||
|
|||
> didn't mention much about this topic |
|||
|
|||
 |
|||
|
|||
- Execute custom shell commands at key points during agent execution. |
|||
- Examples: pre-commit checks, PR reviews, test triggers. |
|||
- Hooks make AI proactive instead of reactive. |
|||
- They reduce manual context switching for developers. |
|||
- https://docs.github.com/en/copilot/how-tos/use-copilot-agents/coding-agent/use-hooks |
|||
|
|||
------ |
|||
|
|||
#### **7. MCP** |
|||
|
|||
- Talk to external tools. |
|||
|
|||
- Enables safe, controlled access to systems (files, APIs, databases). |
|||
|
|||
- Prevents random tool usage; everything is explicit. |
|||
|
|||
|
|||
|
|||
------ |
|||
|
|||
#### **8. Infinite Sessions** |
|||
|
|||
 |
|||
|
|||
- AI should remember the “project context,” not just the last message. |
|||
- Reduces repetition and re-explaining. |
|||
- Enables deeper reasoning over time. |
|||
- Memory + skills + hooks together make “infinite sessions” possible. |
|||
- https://docs.github.com/en/copilot/how-tos/copilot-cli/cli-best-practices#3-leverage-infinite-sessions |
|||
|
|||
------ |
|||
|
|||
#### **9. Plugins** |
|||
|
|||
 |
|||
|
|||
- Extend AI capabilities beyond core model features. |
|||
- https://github.com/marketplace?type=apps&copilot_app=true |
|||
|
|||
------ |
|||
|
|||
#### **10. Git Workflow** |
|||
|
|||
- AI should operate inside your existing Git process. |
|||
- Generate small, focused commits — not giant changes. |
|||
- Use AI for PR descriptions and code reviews. |
|||
- Keep humans in the loop for design decisions. |
|||
- Branching strategy still matters; AI doesn’t replace it. |
|||
- Treat AI like a junior teammate: helpful, but needs supervision. |
|||
- CI + tests remain your primary safety net, not the model. |
|||
- Keep feedback loops fast: generate → test → review → refine. |
|||
|
|||
**Copilot as SDK** |
|||
|
|||
You can wrap GitHub CoPilot into your app as below: |
|||
|
|||
 |
|||
|
|||
#### **As a Developer What You Need to Get from Steve's Talk;** |
|||
|
|||
- Coding agents work best when you treat them like programmable teammates, not autocomplete tools. |
|||
- “Skills” are the right abstraction for scaling AI assistants across a team. |
|||
- Treat skills like shared APIs: version them, review them, and store them in source control. |
|||
- Skills can be installed from Git repos (marketplaces), not just created locally. |
|||
- Slash commands make skills fast, explicit, and reproducible in daily workflow. |
|||
- Use skills to bridge AI ↔ real systems (e.g., GitHub Actions, Playwright, build status). |
|||
- Automation skills are most valuable when they handle end-to-end flows (browser + app + data). |
|||
- Let the agent *discover* the right skill rather than hard-coding every step. |
|||
- Skills reduce hallucination risk by constraining what the agent is allowed to do. |
|||
|
|||
--- |
|||
|
|||
### My Personal Notes about AI |
|||
|
|||
- This is your code tech stack for a basic .NET project: |
|||
|
|||
- Assembly > MSIL > C# > ASP.NET Core > ...ABP... >NuGet + NPM > Your Handmade Business Code |
|||
|
|||
When we ask a development to an AI assisted IDE, AI never starts from Assembly or even it's not writing an existing NPM package. It basically uses what's there on the market. So we know frameworks like ASP.NET Core, ABP will always be there after AI evolution. |
|||
|
|||
- Software engineer is not just writing correct syntax code to explain a program to computer. As an engineer you need to understand the requirements, design the problem, make proper decisions and fix the uncertainty. Asking AI the right questions is very critical these days. |
|||
|
|||
- Tesla cars already started to go autonomous. As a driver, you don't need to care about how the car is driven. You need to choose the right way to go in the shortest time without hussle. |
|||
|
|||
- I talk with other software companies owners, they also say their docs website visits are down. I talked to another guy who's making video tutorials to Pluralsight, he's telling learning from video is decreasing nowadays... |
|||
|
|||
- Nowadays, **developers big new issue is Reviewing the AI generated-code.** In the future, developers who use AI, who inspect AI generated code well and who tells the AI exactly what's needed will be the most important topics. Others (who's typing only code) will be naturally eliminated. Invest your time for these topics. |
|||
|
|||
- We see that our brain is getting lazier, our coding muscles gets weaker day by day. Just like after calculator invention, we stopped calculate big numbers. We'll eventually forget coding. But maybe that's what it needs to be! |
|||
|
|||
- Also I don't think AI will replace developers. Think about washing machines. Since they came out, they still need humans to put the clothes in the machine, pick the best program, take out from the machine and iron. From now on, AI is our assistance in every aspect of our life from shopping, medical issues, learning to coding. Let's benefit from it. |
|||
|
|||
|
|||
|
|||
#### Software and service stocks shed $830 billion in market value in six trading days |
|||
|
|||
Software stocks fall on AI disruption fears on Feb 4, 2026 in NASDAQ. Software and service stocks shed $830 billion in market value in six trading days. Scramble to shield portfolios as AI muddies valuations, business prospects. |
|||
|
|||
|
|||
|
|||
 |
|||
|
|||
**We need to be well prepared for this war.** |
|||
|
After Width: | Height: | Size: 2.2 MiB |
|
After Width: | Height: | Size: 495 KiB |
|
After Width: | Height: | Size: 155 KiB |
|
After Width: | Height: | Size: 430 KiB |
|
After Width: | Height: | Size: 1.9 MiB |
|
After Width: | Height: | Size: 348 KiB |
|
After Width: | Height: | Size: 30 KiB |
|
After Width: | Height: | Size: 34 KiB |
|
After Width: | Height: | Size: 142 KiB |
|
After Width: | Height: | Size: 203 KiB |
|
After Width: | Height: | Size: 315 KiB |
|
After Width: | Height: | Size: 477 KiB |
|
After Width: | Height: | Size: 81 KiB |
|
After Width: | Height: | Size: 260 KiB |
|
After Width: | Height: | Size: 631 KiB |
|
After Width: | Height: | Size: 1.8 MiB |
|
After Width: | Height: | Size: 903 KiB |
|
After Width: | Height: | Size: 300 KiB |
|
After Width: | Height: | Size: 355 KiB |
|
After Width: | Height: | Size: 471 KiB |
|
After Width: | Height: | Size: 56 KiB |
|
After Width: | Height: | Size: 3.7 KiB |
|
After Width: | Height: | Size: 41 KiB |
|
After Width: | Height: | Size: 19 KiB |
@ -0,0 +1,488 @@ |
|||
# Using OpenAI's Moderation API in an ABP Application with the AI Management Module |
|||
|
|||
If your application accepts user-generated content (comments, reviews, forum posts) you likely need some form of content moderation. Building one from scratch typically means training ML models, maintaining datasets, and writing a lot of code. OpenAI's `omni-moderation-latest` model offers a practical shortcut: it's free, requires no training data, and covers 13+ harm categories across text and images in 40+ languages. |
|||
|
|||
In this article, I'll show you how to integrate this model into an ABP application using the [**AI Management Module**](https://abp.io/docs/latest/modules/ai-management). We'll wire it into the [CMS Kit Module's Comment Feature](https://abp.io/docs/latest/modules/cms-kit/comments) so every comment is automatically screened before it's published. The **AI Management Module** handles the OpenAI configuration (API keys, model selection, etc.) through a runtime UI, so you won't need to hardcode any of that into your `appsettings.json` or redeploy when something changes. |
|||
|
|||
By the end, you'll have a working content moderation pipeline you can adapt for any entity in your ABP project. |
|||
|
|||
## Understanding OpenAI's Omni-Moderation Model |
|||
|
|||
Before diving into the implementation, let's understand what makes OpenAI's `omni-moderation-latest` model a game-changer for content moderation. |
|||
|
|||
### What is it? |
|||
|
|||
OpenAI's `omni-moderation-latest` is a next-generation multimodal content moderation model built on the foundation of GPT-4o. Released in September 2024, this model represents a significant leap forward in automated content moderation capabilities. |
|||
|
|||
The most remarkable aspect? **It's completely free to use** through OpenAI's Moderation API, there are no token costs, no usage limits for reasonable use cases, and no hidden fees. |
|||
|
|||
### Key Capabilities |
|||
|
|||
The **omni-moderation** model offers several compelling features that make it ideal for production applications: |
|||
|
|||
- **Multimodal Understanding**: Unlike text-only moderation systems, this model *can process both text and image inputs*, making it suitable for applications where users can upload images alongside their comments or posts. |
|||
- **High Accuracy**: Built on GPT-4o's advanced understanding capabilities, the model achieves significantly higher accuracy in detecting nuanced harmful content compared to rule-based systems or simpler ML models. |
|||
- **Multilingual Support**: The model demonstrates enhanced performance across more than 40 languages, making it suitable for global applications without requiring separate moderation systems for each language. |
|||
- **Comprehensive Category Coverage**: Rather than just detecting "spam" or "not spam," the model classifies content across 13+ distinct categories of potentially harmful content. |
|||
|
|||
### Content Categories |
|||
|
|||
The model evaluates content against the following categories, each designed to catch specific types of harmful content: |
|||
|
|||
| Category | What It Detects | |
|||
|----------|-----------------| |
|||
| `harassment` | Content that expresses, incites, or promotes harassing language towards any individual or group | |
|||
| `harassment/threatening` | Harassment content that additionally includes threats of violence or serious harm | |
|||
| `hate` | Content that promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability, or caste | |
|||
| `hate/threatening` | Hateful content that includes threats of violence or serious harm towards the targeted group | |
|||
| `self-harm` | Content that promotes, encourages, or depicts acts of self-harm such as suicide, cutting, or eating disorders | |
|||
| `self-harm/intent` | Content where the speaker expresses intent to engage in self-harm | |
|||
| `self-harm/instructions` | Content that provides instructions or advice on how to commit acts of self-harm | |
|||
| `sexual` | Content meant to arouse sexual excitement, including descriptions of sexual activity or promotion of sexual services | |
|||
| `sexual/minors` | Sexual content that involves individuals under 18 years of age | |
|||
| `violence` | Content that depicts death, violence, or physical injury in graphic detail | |
|||
| `violence/graphic` | Content depicting violence or physical injury in extremely graphic, disturbing detail | |
|||
| `illicit` | Content that provides advice or instructions for committing illegal activities | |
|||
| `illicit/violent` | Illicit content that specifically involves violence or weapons | |
|||
|
|||
### API Response Structure |
|||
|
|||
When you send content to the Moderation API (through model or directly to the API), you receive a structured response containing: |
|||
|
|||
- **`flagged`**: A boolean indicating whether the content violates any of OpenAI's usage policies. This is your primary indicator for whether to block content. |
|||
- **`categories`**: A dictionary containing boolean flags for each category, telling you exactly which policies were violated. |
|||
- **`category_scores`**: Confidence scores ranging from 0 to 1 for each category, allowing you to implement custom thresholds if needed. |
|||
- **`category_applied_input_types`**: A dictionary containing information on which input types were flagged for each category. For example, if both the image and text inputs to the model are flagged for "violence/graphic", the `violence/graphic` property will be set to `["image", "text"]`. This is only available on omni models. |
|||
|
|||
> For more detailed information about the model's capabilities and best practices, refer to the [OpenAI Moderation Guide](https://platform.openai.com/docs/guides/moderation). |
|||
|
|||
## The AI Management Module: Your Dynamic AI Configuration Hub |
|||
|
|||
The [AI Management Module](https://abp.io/docs/latest/modules/ai-management) is a powerful addition to the ABP Platform that transforms how you integrate and manage AI capabilities in your applications. Built on top of the [ABP Framework's AI infrastructure](https://abp.io/docs/latest/framework/infrastructure/artificial-intelligence), it provides a complete solution for managing AI workspaces dynamically—without requiring code changes or application redeployment. |
|||
|
|||
### Why Use the AI Management Module? |
|||
|
|||
Traditional AI integrations often suffer from several pain points: |
|||
|
|||
1. **Hardcoded Configuration**: API keys, model names, and endpoints are typically stored in configuration files, requiring redeployment for any changes. |
|||
2. **No Runtime Flexibility**: Switching between AI providers or models requires code changes. |
|||
3. **Security Concerns**: Managing API keys across environments is cumbersome and error-prone. |
|||
4. **Limited Visibility**: There's no easy way to see which AI configurations are active or test them without writing code. |
|||
|
|||
The AI Management Module addresses all these concerns by providing: |
|||
|
|||
- **Dynamic Workspace Management**: Create, configure, and update AI workspaces directly from a user-friendly administrative interface—no code changes required. |
|||
- **Provider Flexibility**: Seamlessly switch between different AI providers (OpenAI, Gemini, Antrophic, Azure OpenAI, Ollama, and custom providers) without modifying your application code. |
|||
- **Built-in Testing**: Test your AI configurations immediately using the included chat interface playground before deploying to production. |
|||
- **Permission-Based Access Control**: Define granular permissions to control who can manage AI workspaces and who can use specific AI features. |
|||
- **Multi-Framework Support**: Full support for MVC/Razor Pages, Blazor (Server & WebAssembly), and Angular UI frameworks. |
|||
|
|||
### Built-in Provider Support |
|||
|
|||
The **AI Management Module** comes with built-in support for popular AI providers through dedicated NuGet packages: |
|||
|
|||
- **`Volo.AIManagement.OpenAI`**: Provides seamless integration with OpenAI's APIs, including GPT models and the *Moderation API*. |
|||
- Custom providers can be added by implementing the `IChatClientFactory` interface. (If you configured the Ollama while creating your project, then you can see the example implementation for Ollama) |
|||
|
|||
## Building the Demo Application |
|||
|
|||
Now let's put theory into practice by building a complete content moderation system. We'll create an ABP application with the **AI Management Module**, configure OpenAI as our provider, set up the CMS Kit Comment Feature, and implement automatic content moderation for all user comments. |
|||
|
|||
### Step 1: Creating an Application with AI Management Module |
|||
|
|||
> In this tutorial, I'll create a **layered MVC application** named **ContentModeration**. If you already have an existing solution, you can follow along by replacing the namespaces accordingly. Otherwise, feel free to follow the solution creation steps below. |
|||
|
|||
The most straightforward way to create an application with the AI Management Module is through **ABP Studio**. When you create a new project, you'll encounter an **AI Integration** step in the project creation wizard. This wizard allows you to: |
|||
|
|||
- Enable the AI Management Module with a single checkbox |
|||
- Configure your preferred AI provider (OpenAI and Ollama) |
|||
- Set up initial workspace configurations |
|||
- Automatically install all required NuGet packages |
|||
|
|||
> **Note:** The AI Integration tab in ABP Studio currently only supports the **MVC/Razor Pages** UI. Support for **Angular** and **Blazor** UIs will be added in upcoming versions. |
|||
|
|||
 |
|||
|
|||
During the wizard, select **OpenAI** as your AI provider, set the model name as `omni-moderation-latest` and provide your API key. The wizard will automatically: |
|||
|
|||
1. Install the `Volo.AIManagement.*` packages across your solution |
|||
2. Install the `Volo.AIManagement.OpenAI` package for OpenAI provider support (you can use any OpenAI compatible model here, including Gemini, Claude and GPT models) |
|||
3. Configure the necessary module dependencies |
|||
4. Set up initial database migrations |
|||
|
|||
**Alternative Installation Method:** |
|||
|
|||
If you have an existing project or prefer manual installation, you can add the module using the ABP CLI: |
|||
|
|||
```bash |
|||
abp add-module Volo.AIManagement |
|||
``` |
|||
|
|||
Or through ABP Studio by right-clicking on your solution, selecting **Import Module**, and choosing `Volo.AIManagement` from the NuGet tab. |
|||
|
|||
### Step 2: Understanding the OpenAI Workspace Configuration |
|||
|
|||
After creating your project and running the application for the first time, navigate to **AI Management > Workspaces** in the admin menu. Here you'll find the workspace management interface where you can view, create, and modify AI workspaces. |
|||
|
|||
 |
|||
|
|||
If you configured OpenAI during the project creation wizard, you'll already have a workspace set up. Otherwise, you can create a new workspace with the following configuration: |
|||
|
|||
| Property | Value | Description | |
|||
|----------|-------|-------------| |
|||
| **Name** | `OpenAIAssistant` | A unique identifier for this workspace (no spaces allowed) | |
|||
| **Provider** | `OpenAI` | The AI provider to use | |
|||
| **Model** | `omni-moderation-latest` | The specific model for content moderation | |
|||
| **API Key** | `<Your-OpenAI-API-key>` | Authentication credential for the OpenAI API | |
|||
| **Description** | `Workspace for content moderation` | A helpful description for administrators | |
|||
|
|||
The beauty of this approach is that you can modify any of these settings at runtime through the UI. Need to rotate your API key? Just update it in the workspace configuration. Want to test a different model? Change it without touching your code. |
|||
|
|||
### Step 3: Setting Up the CMS Kit Comment Feature |
|||
|
|||
Now let's add the CMS Kit Module to enable the Comment Feature. The CMS Kit provides a robust, production-ready commenting system that we'll enhance with our content moderation. |
|||
|
|||
**Install the CMS Kit Module:** |
|||
|
|||
Run the following command in your solution directory: |
|||
|
|||
```bash |
|||
abp add-module Volo.CmsKit --skip-db-migrations |
|||
``` |
|||
|
|||
> Also, you can add the related module through ABP Studio UI. |
|||
|
|||
**Enable the Comment Feature:** |
|||
|
|||
By default, CMS Kit features are disabled to keep your application lean. Open the `GlobalFeatureConfigurator` class in your `*.Domain.Shared` project and enable the Comment Feature: |
|||
|
|||
```csharp |
|||
using Volo.Abp.GlobalFeatures; |
|||
using Volo.Abp.Threading; |
|||
|
|||
namespace ContentModeration; |
|||
|
|||
public static class ContentModerationGlobalFeatureConfigurator |
|||
{ |
|||
private static readonly OneTimeRunner OneTimeRunner = new OneTimeRunner(); |
|||
|
|||
public static void Configure() |
|||
{ |
|||
OneTimeRunner.Run(() => |
|||
{ |
|||
GlobalFeatureManager.Instance.Modules.CmsKit(cmsKit => |
|||
{ |
|||
//only enable the Comment Feature |
|||
cmsKit.Comments.Enable(); |
|||
}); |
|||
}); |
|||
} |
|||
} |
|||
``` |
|||
|
|||
**Configure the Comment Entity Types:** |
|||
|
|||
Open your `*DomainModule` class and configure which entity types can have comments. For our demo, we'll enable comments on "Article" entities: |
|||
|
|||
```csharp |
|||
using Volo.CmsKit.Comments; |
|||
|
|||
// In your ConfigureServices method: |
|||
Configure<CmsKitCommentOptions>(options => |
|||
{ |
|||
options.EntityTypes.Add(new CommentEntityTypeDefinition("Article")); |
|||
}); |
|||
``` |
|||
|
|||
**Add the Comment Component to a Page:** |
|||
|
|||
Finally, let's add the commenting interface to a page. Open the `Index.cshtml` file in your `*.Web` project and add the Comment component (replace with the following content): |
|||
|
|||
```html |
|||
@page |
|||
@using Volo.CmsKit.Public.Web.Pages.CmsKit.Shared.Components.Commenting |
|||
@model ContentModeration.Web.Pages.IndexModel |
|||
|
|||
<div class="container mt-4"> |
|||
<div class="card"> |
|||
<div class="card-header"> |
|||
<h3>Welcome to Our Community</h3> |
|||
</div> |
|||
<div class="card-body"> |
|||
<p class="lead"> |
|||
Share your thoughts in the comments below. Our AI-powered moderation system |
|||
automatically reviews all comments to ensure a safe and respectful environment |
|||
for everyone. |
|||
</p> |
|||
|
|||
<hr/> |
|||
|
|||
<h4>Comments</h4> |
|||
@await Component.InvokeAsync(typeof(CommentingViewComponent), new |
|||
{ |
|||
entityType = "Article", |
|||
entityId = "welcome-article", |
|||
isReadOnly = false |
|||
}) |
|||
</div> |
|||
</div> |
|||
</div> |
|||
``` |
|||
|
|||
At this point, you have a fully functional commenting system. Users can post comments, reply to existing comments, and interact with the community. |
|||
|
|||
 |
|||
|
|||
However, there's no content moderation yet and any content, including harmful content, would be accepted. Let's fix that! |
|||
|
|||
## Implementing the Content Moderation Service |
|||
|
|||
**Now comes the exciting part:** implementing the content moderation service that leverages OpenAI's `omni-moderation` model to automatically screen all comments before they're published. |
|||
|
|||
### Understanding the Architecture |
|||
|
|||
Our implementation follows a clean, modular architecture: |
|||
|
|||
1. **`IContentModerator` Interface**: Defines the contract for content moderation, making our implementation testable and replaceable. |
|||
2. **`ContentModerator` Service**: The concrete implementation that calls OpenAI's Moderation API using the configuration from the AI Management Module. |
|||
3. **`MyCommentAppService`**: An override of the CMS Kit's comment service that integrates our moderation logic. |
|||
|
|||
This separation of concerns ensures that: |
|||
|
|||
- The moderation logic is isolated and can be unit tested independently |
|||
- You can easily swap the moderation implementation (e.g., switch to a different provider) |
|||
- The integration with CMS Kit is clean and maintainable |
|||
|
|||
### Creating the Content Moderator Interface |
|||
|
|||
First, let's define the interface in your `*.Application.Contracts` project. This interface is intentionally simple and it takes text input and throws an exception if the content is harmful: |
|||
|
|||
```csharp |
|||
using System.Threading.Tasks; |
|||
|
|||
namespace ContentModeration.Moderation; |
|||
|
|||
public interface IContentModerator |
|||
{ |
|||
Task CheckAsync(string text); |
|||
} |
|||
``` |
|||
|
|||
### Implementing the Content Moderator Service |
|||
|
|||
Now let's implement the service in your `*.Application` project. This implementation uses the `IWorkspaceConfigurationStore` from the AI Management Module to dynamically retrieve the OpenAI configuration: |
|||
|
|||
```csharp |
|||
using System.Collections.Generic; |
|||
using System.Threading.Tasks; |
|||
using OpenAI.Moderations; |
|||
using Volo.Abp; |
|||
using Volo.Abp.DependencyInjection; |
|||
using Volo.AIManagement.Workspaces.Configuration; |
|||
|
|||
namespace ContentModeration.Moderation; |
|||
|
|||
public class ContentModerator : IContentModerator, ITransientDependency |
|||
{ |
|||
private readonly IWorkspaceConfigurationStore _workspaceConfigurationStore; |
|||
|
|||
public ContentModerator(IWorkspaceConfigurationStore workspaceConfigurationStore) |
|||
{ |
|||
_workspaceConfigurationStore = workspaceConfigurationStore; |
|||
} |
|||
|
|||
public async Task CheckAsync(string text) |
|||
{ |
|||
// Skip moderation for empty content |
|||
if (string.IsNullOrWhiteSpace(text)) |
|||
{ |
|||
return; |
|||
} |
|||
|
|||
// Retrieve the workspace configuration from AI Management Module |
|||
// This allows runtime configuration changes without redeployment |
|||
var config = await _workspaceConfigurationStore.GetOrNullAsync<OpenAIAssistantWorkspace>(); |
|||
|
|||
if(config == null) |
|||
{ |
|||
throw new UserFriendlyException("Could not find the 'OpenAIAssistant' workspace!"); |
|||
} |
|||
|
|||
var client = new ModerationClient( |
|||
model: config.Model, |
|||
apiKey: config.ApiKey |
|||
); |
|||
|
|||
// Send the text to OpenAI's Moderation API |
|||
var result = await client.ClassifyTextAsync(text); |
|||
var moderationResult = result.Value; |
|||
|
|||
// If the content is flagged, throw a user-friendly exception |
|||
if (moderationResult.Flagged) |
|||
{ |
|||
var flaggedCategories = GetFlaggedCategories(moderationResult); |
|||
|
|||
throw new UserFriendlyException( |
|||
$"Your comment contains content that violates our community guidelines. " + |
|||
$"Detected issues: {string.Join(", ", flaggedCategories)}. " + |
|||
$"Please revise your comment and try again." |
|||
); |
|||
} |
|||
} |
|||
|
|||
private static List<string> GetFlaggedCategories(ModerationResult result) |
|||
{ |
|||
var flaggedCategories = new List<string>(); |
|||
|
|||
if (result.Harassment.Flagged) |
|||
{ |
|||
flaggedCategories.Add("harassment"); |
|||
} |
|||
if (result.HarassmentThreatening.Flagged) |
|||
{ |
|||
flaggedCategories.Add("threatening harassment"); |
|||
} |
|||
|
|||
//other categories... |
|||
|
|||
return flaggedCategories; |
|||
} |
|||
} |
|||
``` |
|||
|
|||
> **Note**: The `ModerationResult` class from the OpenAI .NET SDK provides properties for each moderation category (e.g., `Harassment`, `Violence`, `Sexual`), each with a `Flagged` boolean and a `Score` float (0-1). The exact property names may vary slightly between SDK versions, so check the [OpenAI .NET SDK documentation](https://github.com/openai/openai-dotnet) for the latest API. |
|||
|
|||
### Integrating with CMS Kit Comments |
|||
|
|||
The final piece of the puzzle is integrating our moderation service with the CMS Kit's comment system. We'll override the `CommentPublicAppService` to intercept all comment creation and update requests: |
|||
|
|||
```csharp |
|||
using System; |
|||
using System.Threading.Tasks; |
|||
using ContentModeration.Moderation; |
|||
using Microsoft.Extensions.Options; |
|||
using Volo.Abp.DependencyInjection; |
|||
using Volo.Abp.EventBus.Distributed; |
|||
using Volo.CmsKit.Comments; |
|||
using Volo.CmsKit.Public.Comments; |
|||
using Volo.CmsKit.Users; |
|||
using Volo.Abp.SettingManagement; |
|||
|
|||
namespace ContentModeration.Comments; |
|||
|
|||
[Dependency(ReplaceServices = true)] |
|||
[ExposeServices(typeof(ICommentPublicAppService), typeof(CommentPublicAppService), typeof(MyCommentAppService))] |
|||
public class MyCommentAppService : CommentPublicAppService |
|||
{ |
|||
protected IContentModerator ContentModerator { get; } |
|||
|
|||
public MyCommentAppService( |
|||
ICommentRepository commentRepository, |
|||
ICmsUserLookupService cmsUserLookupService, |
|||
IDistributedEventBus distributedEventBus, |
|||
CommentManager commentManager, |
|||
IOptionsSnapshot<CmsKitCommentOptions> cmsCommentOptions, |
|||
ISettingManager settingManager, |
|||
IContentModerator contentModerator) |
|||
: base(commentRepository, cmsUserLookupService, distributedEventBus, commentManager, cmsCommentOptions, settingManager) |
|||
{ |
|||
ContentModerator = contentModerator; |
|||
} |
|||
|
|||
public override async Task<CommentDto> CreateAsync(string entityType, string entityId, CreateCommentInput input) |
|||
{ |
|||
// Check for harmful content BEFORE creating the comment |
|||
// If harmful content is detected, an exception is thrown and the comment is not saved |
|||
await ContentModerator.CheckAsync(input.Text); |
|||
|
|||
return await base.CreateAsync(entityType, entityId, input); |
|||
} |
|||
|
|||
public override async Task<CommentDto> UpdateAsync(Guid id, UpdateCommentInput input) |
|||
{ |
|||
// Check for harmful content BEFORE updating the comment |
|||
// This prevents users from editing approved comments to add harmful content |
|||
await ContentModerator.CheckAsync(input.Text); |
|||
|
|||
return await base.UpdateAsync(id, input); |
|||
} |
|||
} |
|||
``` |
|||
|
|||
**How This Works:** |
|||
|
|||
1. When a user submits a new comment, the `CreateAsync` method is called. |
|||
2. Before the comment is saved to the database, we call `ContentModerator.CheckAsync()` with the comment text. |
|||
3. The moderation service sends the text to OpenAI's Moderation API. |
|||
4. If the content is flagged as harmful, a `UserFriendlyException` is thrown with a descriptive message. |
|||
5. The exception is caught by ABP's exception handling middleware and displayed to the user as a friendly error message. |
|||
6. If the content passes moderation, the comment is saved normally. |
|||
|
|||
The same flow applies to comment updates, ensuring users can't circumvent moderation by editing previously approved comments. |
|||
|
|||
Here's the full flow in action — submitting a comment with harmful content and seeing the moderation kick in: |
|||
|
|||
 |
|||
|
|||
## The Power of Dynamic Configuration - What AI Management Module Provides to You? |
|||
|
|||
One of the most significant advantages of using the AI Management Module is the ability to manage your AI configurations dynamically. Let's explore what this means in practice. |
|||
|
|||
### Runtime Configuration Changes |
|||
|
|||
With the AI Management Module, you can: |
|||
|
|||
- **Rotate API Keys**: Update your OpenAI API key through the admin UI without any downtime or redeployment. This is crucial for security compliance and key rotation policies. |
|||
- **Switch Models**: Want to test a newer moderation model? Simply update the model name in the workspace configuration. Your application will immediately start using the new model. |
|||
- **Adjust Settings**: Fine-tune settings like temperature or system prompts (for chat-based workspaces) without touching your codebase. |
|||
- **Enable/Disable Workspaces**: Temporarily disable a workspace for maintenance or testing without affecting other parts of your application. |
|||
|
|||
### Multi-Environment Management |
|||
|
|||
The dynamic configuration approach shines in multi-environment scenarios: |
|||
|
|||
- **Development**: Use a test API key with lower rate limits |
|||
- **Staging**: Use a separate API key for integration testing |
|||
- **Production**: Use your production API key with appropriate security measures |
|||
|
|||
All these configurations can be managed through the UI or via data seeding, without environment-specific code changes. |
|||
|
|||
### Actively Maintained & What's Coming Next |
|||
|
|||
The AI Management Module is **actively maintained** and continuously evolving. The team is working on exciting new capabilities that will further expand what you can do with AI in your ABP applications: |
|||
|
|||
- **MCP (Model Context Protocol) Support** — Coming in **v10.2**, MCP support will allow your AI workspaces to interact with external tools and data sources, enabling more sophisticated AI-powered workflows. |
|||
- **RAG (Retrieval-Augmented Generation) System** — Also planned for **v10.2**, the built-in RAG system will let you ground AI responses in your own data, making AI features more accurate and context-aware. |
|||
- **And More** — Additional features and improvements are on the roadmap to make AI integration even more seamless. |
|||
|
|||
Since the module is built on ABP's modular architecture, adopting these new capabilities will be straightforward — you can simply update the module and start using the new features without rewriting your existing AI integrations. |
|||
|
|||
### Permission-Based Access Control |
|||
|
|||
The AI Management Module integrates with ABP's permission system, allowing you to: |
|||
|
|||
- Restrict who can view AI workspace configurations |
|||
- Control who can create or modify workspaces |
|||
- Limit access to specific workspaces based on user roles |
|||
|
|||
This ensures that sensitive configurations like API keys are only accessible to authorized administrators. |
|||
|
|||
## Conclusion |
|||
|
|||
In this comprehensive guide, we've built a production-ready content moderation system that combines the power of OpenAI's `omni-moderation-latest` model with the flexibility of ABP's AI Management Module. Let's recap what makes this approach powerful: |
|||
|
|||
### Key Takeaways |
|||
|
|||
1. **Zero Training Required**: Unlike traditional ML approaches that require collecting datasets, training models, and ongoing maintenance, OpenAI's Moderation API works out of the box with state-of-the-art accuracy. |
|||
2. **Completely Free**: OpenAI's Moderation API has no token costs, making it economically viable for applications of any scale. |
|||
3. **Comprehensive Detection**: With 13+ categories of harmful content detection, you get protection against harassment, hate speech, violence, sexual content, self-harm, and more—all from a single API call. |
|||
4. **Dynamic Configuration**: The AI Management Module allows you to manage API keys, switch providers, and adjust settings at runtime without code changes or redeployment. |
|||
5. **Clean Integration**: By following ABP's service override pattern, we integrated moderation seamlessly into the existing CMS Kit comment system without modifying the original module. |
|||
6. **Production Ready**: The implementation includes proper error handling, graceful degradation, and user-friendly error messages suitable for production use. |
|||
|
|||
### Resources |
|||
|
|||
- [AI Management Module Documentation](https://abp.io/docs/latest/modules/ai-management) |
|||
- [OpenAI Moderation Guide](https://platform.openai.com/docs/guides/moderation) |
|||
- [CMS Kit Comments Feature](https://abp.io/docs/latest/modules/cms-kit/comments) |
|||
- [ABP Framework AI Infrastructure](https://abp.io/docs/latest/framework/infrastructure/artificial-intelligence) |
|||
@ -0,0 +1,133 @@ |
|||
```json |
|||
//[doc-seo] |
|||
{ |
|||
"Description": "Learn how to connect AI tools like Cursor, Claude Desktop, and VS Code to ABP Studio using the Model Context Protocol (MCP)." |
|||
} |
|||
``` |
|||
|
|||
# ABP Studio: Model Context Protocol (MCP) |
|||
|
|||
````json |
|||
//[doc-nav] |
|||
{ |
|||
"Next": { |
|||
"Name": "Working with Kubernetes", |
|||
"Path": "studio/kubernetes" |
|||
} |
|||
} |
|||
```` |
|||
|
|||
ABP Studio includes built-in [Model Context Protocol (MCP)](https://modelcontextprotocol.io/) support so AI tools can query runtime telemetry and control solution runner operations. |
|||
|
|||
## How It Works |
|||
|
|||
ABP Studio runs a local MCP server in the background. The `abp mcp-studio` CLI command acts as a stdio bridge that AI clients connect to. The bridge forwards requests to ABP Studio and returns responses. |
|||
|
|||
```text |
|||
MCP Client (Cursor / Claude Desktop / VS Code) |
|||
──stdio──▶ abp mcp-studio ──HTTP──▶ ABP Studio |
|||
``` |
|||
|
|||
> ABP Studio must be running while MCP is used. If ABP Studio is not running (or its MCP endpoint is unavailable), `abp mcp-studio` returns an error to the AI client. |
|||
|
|||
## Configuration |
|||
|
|||
### Cursor (`.cursor/mcp.json`) |
|||
|
|||
```json |
|||
{ |
|||
"mcpServers": { |
|||
"abp-studio": { |
|||
"command": "abp", |
|||
"args": ["mcp-studio"] |
|||
} |
|||
} |
|||
} |
|||
``` |
|||
|
|||
### Claude Desktop (`claude_desktop_config.json`) |
|||
|
|||
```json |
|||
{ |
|||
"mcpServers": { |
|||
"abp-studio": { |
|||
"command": "abp", |
|||
"args": ["mcp-studio"] |
|||
} |
|||
} |
|||
} |
|||
``` |
|||
|
|||
Claude Desktop config file locations: |
|||
|
|||
- macOS: `~/Library/Application Support/Claude/claude_desktop_config.json` |
|||
- Windows: `%APPDATA%\Claude\claude_desktop_config.json` |
|||
- Linux: `~/.config/Claude/claude_desktop_config.json` |
|||
|
|||
### VS Code (`.vscode/mcp.json`) |
|||
|
|||
```json |
|||
{ |
|||
"servers": { |
|||
"abp-studio": { |
|||
"command": "abp", |
|||
"args": ["mcp-studio"] |
|||
} |
|||
} |
|||
} |
|||
``` |
|||
|
|||
### Quick Reference |
|||
|
|||
You can run `abp help mcp-studio` at any time to see the available options and example configuration snippets for each supported IDE directly in your terminal. |
|||
|
|||
### Generating Config Files from ABP Studio |
|||
|
|||
When creating a new solution, ABP Studio can generate MCP configuration files for Cursor and VS Code automatically. |
|||
|
|||
## Available Tools |
|||
|
|||
ABP Studio exposes the following tools to MCP clients. All tools operate on the currently open solution and selected run profile in ABP Studio. |
|||
|
|||
### Monitoring |
|||
|
|||
| Tool | Description | |
|||
|------|-------------| |
|||
| `list_applications` | Lists all running ABP applications connected to ABP Studio. | |
|||
| `get_exceptions` | Gets recent exceptions including stack traces and error messages. | |
|||
| `get_logs` | Gets log entries. Can be filtered by log level. | |
|||
| `get_requests` | Gets HTTP request information. Can be filtered by status code. | |
|||
| `get_events` | Gets distributed events for debugging inter-service communication. | |
|||
| `clear_monitor` | Clears collected monitor data. | |
|||
|
|||
### Application Control |
|||
|
|||
| Tool | Description | |
|||
|------|-------------| |
|||
| `list_runnable_applications` | Lists all applications in the current run profile with their state. | |
|||
| `start_application` | Starts a stopped application. | |
|||
| `stop_application` | Stops a running application. | |
|||
| `restart_application` | Restarts a running application. | |
|||
| `build_application` | Builds a .NET application using `dotnet build`. | |
|||
|
|||
### Container Control |
|||
|
|||
| Tool | Description | |
|||
|------|-------------| |
|||
| `list_containers` | Lists Docker containers in the current run profile with their state. | |
|||
| `start_containers` | Starts Docker containers (docker-compose up). | |
|||
| `stop_containers` | Stops Docker containers (docker-compose down). | |
|||
|
|||
### Solution Structure |
|||
|
|||
| Tool | Description | |
|||
|------|-------------| |
|||
| `get_solution_info` | Gets solution name, path, template, and run profile information. | |
|||
| `list_modules` | Lists all modules in the solution. | |
|||
| `list_packages` | Lists packages (projects) in the solution. Can be filtered by module. | |
|||
| `get_module_dependencies` | Gets module dependency/import information. | |
|||
|
|||
## Notes |
|||
|
|||
- Monitor data (exceptions, logs, requests, events) is kept in memory and is cleared when the solution is closed. |
|||
- The `abp mcp-studio` command connects to the local ABP Studio instance. This is separate from the `abp mcp` command, which connects to the ABP.IO cloud MCP service and requires an active license. |
|||
@ -1,9 +1,9 @@ |
|||
{ |
|||
"culture": "de", |
|||
"texts": { |
|||
"TenantNotFoundMessage": "Mieter nicht gefunden!", |
|||
"TenantNotFoundMessage": "Mandant nicht gefunden!", |
|||
"TenantNotFoundDetails": "Es gibt keinen Mandanten mit der Mandanten-ID oder dem Namen: {0}", |
|||
"TenantNotActiveMessage": "Mieter ist nicht aktiv!", |
|||
"TenantNotActiveMessage": "Mandant ist nicht aktiv!", |
|||
"TenantNotActiveDetails": "Der Mandant ist mit der Mandanten-ID oder dem Namen nicht aktiv: {0}" |
|||
} |
|||
} |
|||
@ -0,0 +1,47 @@ |
|||
using System; |
|||
using System.Text; |
|||
using System.Text.Json; |
|||
using JetBrains.Annotations; |
|||
using Swashbuckle.AspNetCore.SwaggerUI; |
|||
using Volo.Abp; |
|||
|
|||
namespace Microsoft.Extensions.DependencyInjection; |
|||
|
|||
public static class AbpSwaggerUIOptionsExtensions |
|||
{ |
|||
/// <summary>
|
|||
/// Sets the abp.appPath used by the Swagger UI scripts.
|
|||
/// </summary>
|
|||
/// <param name="options">The Swagger UI options.</param>
|
|||
/// <param name="appPath">The application base path.</param>
|
|||
public static void AbpAppPath([NotNull] this SwaggerUIOptions options, [NotNull] string appPath) |
|||
{ |
|||
Check.NotNull(options, nameof(options)); |
|||
Check.NotNull(appPath, nameof(appPath)); |
|||
|
|||
var normalizedAppPath = NormalizeAppPath(appPath); |
|||
options.HeadContent = BuildAppPathScript(normalizedAppPath, options.HeadContent ?? string.Empty); |
|||
} |
|||
|
|||
private static string NormalizeAppPath(string appPath) |
|||
{ |
|||
return string.IsNullOrWhiteSpace(appPath) |
|||
? "/" |
|||
: appPath.Trim().EnsureStartsWith('/').EnsureEndsWith('/'); |
|||
} |
|||
|
|||
private static string BuildAppPathScript(string normalizedAppPath, string headContent) |
|||
{ |
|||
var builder = new StringBuilder(headContent); |
|||
if (builder.Length > 0) |
|||
{ |
|||
builder.AppendLine(); |
|||
} |
|||
|
|||
builder.AppendLine("<script>"); |
|||
builder.AppendLine(" var abp = abp || {};"); |
|||
builder.AppendLine($" abp.appPath = {JsonSerializer.Serialize(normalizedAppPath)};"); |
|||
builder.AppendLine("</script>"); |
|||
return builder.ToString(); |
|||
} |
|||
} |
|||
@ -0,0 +1,17 @@ |
|||
<Project Sdk="Microsoft.NET.Sdk"> |
|||
|
|||
<Import Project="..\..\..\common.test.props" /> |
|||
|
|||
<PropertyGroup> |
|||
<TargetFramework>net10.0</TargetFramework> |
|||
<RootNamespace /> |
|||
</PropertyGroup> |
|||
|
|||
<ItemGroup> |
|||
<ProjectReference Include="..\..\src\Volo.Abp.AspNetCore.Mvc.Client\Volo.Abp.AspNetCore.Mvc.Client.csproj" /> |
|||
<ProjectReference Include="..\..\src\Volo.Abp.Autofac\Volo.Abp.Autofac.csproj" /> |
|||
<ProjectReference Include="..\AbpTestBase\AbpTestBase.csproj" /> |
|||
<PackageReference Include="Microsoft.NET.Test.Sdk" /> |
|||
</ItemGroup> |
|||
|
|||
</Project> |
|||
@ -0,0 +1,11 @@ |
|||
using Volo.Abp.Testing; |
|||
|
|||
namespace Volo.Abp.AspNetCore.Mvc.Client; |
|||
|
|||
public abstract class AbpAspNetCoreMvcClientTestBase : AbpIntegratedTest<AbpAspNetCoreMvcClientTestModule> |
|||
{ |
|||
protected override void SetAbpApplicationCreationOptions(AbpApplicationCreationOptions options) |
|||
{ |
|||
options.UseAutofac(); |
|||
} |
|||
} |
|||
@ -0,0 +1,17 @@ |
|||
using Microsoft.Extensions.DependencyInjection; |
|||
using Volo.Abp.Autofac; |
|||
using Volo.Abp.Modularity; |
|||
|
|||
namespace Volo.Abp.AspNetCore.Mvc.Client; |
|||
|
|||
[DependsOn( |
|||
typeof(AbpAspNetCoreMvcClientModule), |
|||
typeof(AbpAutofacModule) |
|||
)] |
|||
public class AbpAspNetCoreMvcClientTestModule : AbpModule |
|||
{ |
|||
public override void ConfigureServices(ServiceConfigurationContext context) |
|||
{ |
|||
context.Services.AddHttpContextAccessor(); |
|||
} |
|||
} |
|||
@ -0,0 +1,105 @@ |
|||
using System.Collections.Generic; |
|||
using System.Threading.Tasks; |
|||
using Microsoft.Extensions.DependencyInjection; |
|||
using Microsoft.Extensions.DependencyInjection.Extensions; |
|||
using NSubstitute; |
|||
using Shouldly; |
|||
using Volo.Abp.AspNetCore.Mvc.ApplicationConfigurations; |
|||
using Volo.Abp.AspNetCore.Mvc.ApplicationConfigurations.ClientProxies; |
|||
using Volo.Abp.Localization; |
|||
using Xunit; |
|||
|
|||
namespace Volo.Abp.AspNetCore.Mvc.Client; |
|||
|
|||
public class MvcCachedApplicationConfigurationClient_Tests : AbpAspNetCoreMvcClientTestBase |
|||
{ |
|||
private AbpApplicationConfigurationClientProxy _configProxy; |
|||
private AbpApplicationLocalizationClientProxy _localizationProxy; |
|||
|
|||
private readonly ICachedApplicationConfigurationClient _applicationConfigurationClient; |
|||
|
|||
public MvcCachedApplicationConfigurationClient_Tests() |
|||
{ |
|||
_applicationConfigurationClient = GetRequiredService<ICachedApplicationConfigurationClient>(); |
|||
} |
|||
|
|||
protected override void AfterAddApplication(IServiceCollection services) |
|||
{ |
|||
_configProxy = Substitute.For<AbpApplicationConfigurationClientProxy>(); |
|||
_localizationProxy = Substitute.For<AbpApplicationLocalizationClientProxy>(); |
|||
|
|||
services.Replace(ServiceDescriptor.Transient(_ => _configProxy)); |
|||
services.Replace(ServiceDescriptor.Transient(_ => _localizationProxy)); |
|||
} |
|||
|
|||
[Fact] |
|||
public async Task Should_Use_CurrentUICulture_For_Localization_Request() |
|||
{ |
|||
var cultureName = "en"; |
|||
|
|||
using (CultureHelper.Use(cultureName)) |
|||
{ |
|||
var configTcs = new TaskCompletionSource<ApplicationConfigurationDto>(); |
|||
_configProxy.GetAsync(Arg.Any<ApplicationConfigurationRequestOptions>()).Returns(configTcs.Task); |
|||
|
|||
var expectedResources = new Dictionary<string, ApplicationLocalizationResourceDto> |
|||
{ |
|||
["TestResource"] = new() |
|||
}; |
|||
|
|||
_localizationProxy.GetAsync(Arg.Any<ApplicationLocalizationRequestDto>()).Returns(new ApplicationLocalizationDto { Resources = expectedResources }); |
|||
|
|||
var resultTask = _applicationConfigurationClient.GetAsync(); |
|||
|
|||
// Localization request should be fired before config completes (concurrent).
|
|||
await _localizationProxy.Received(1).GetAsync(Arg.Is<ApplicationLocalizationRequestDto>(x => x.CultureName == cultureName && x.OnlyDynamics == true)); |
|||
|
|||
// Now let config complete.
|
|||
configTcs.SetResult(CreateConfigDto(cultureName)); |
|||
var result = await resultTask; |
|||
|
|||
result.Localization.Resources.ShouldBe(expectedResources); |
|||
|
|||
await _configProxy.Received(1).GetAsync(Arg.Is<ApplicationConfigurationRequestOptions>(x => x.IncludeLocalizationResources == false)); |
|||
} |
|||
} |
|||
|
|||
[Fact] |
|||
public async Task Should_Refetch_Localization_When_Culture_Differs() |
|||
{ |
|||
var currentCulture = "en"; |
|||
var serverCulture = "tr"; |
|||
|
|||
using (CultureHelper.Use(currentCulture)) |
|||
{ |
|||
_configProxy.GetAsync(Arg.Any<ApplicationConfigurationRequestOptions>()).Returns(CreateConfigDto(serverCulture)); |
|||
|
|||
var wrongResources = new Dictionary<string, ApplicationLocalizationResourceDto>(); |
|||
var correctResources = new Dictionary<string, ApplicationLocalizationResourceDto> |
|||
{ |
|||
["TestResource"] = new() |
|||
}; |
|||
|
|||
_localizationProxy.GetAsync(Arg.Is<ApplicationLocalizationRequestDto>(x => x.CultureName == currentCulture)).Returns(new ApplicationLocalizationDto { Resources = wrongResources }); |
|||
_localizationProxy.GetAsync(Arg.Is<ApplicationLocalizationRequestDto>(x => x.CultureName == serverCulture)).Returns(new ApplicationLocalizationDto { Resources = correctResources }); |
|||
|
|||
var result = await _applicationConfigurationClient.GetAsync(); |
|||
|
|||
result.Localization.Resources.ShouldBe(correctResources); |
|||
|
|||
await _localizationProxy.Received(1).GetAsync(Arg.Is<ApplicationLocalizationRequestDto>(x => x.CultureName == currentCulture)); |
|||
await _localizationProxy.Received(1).GetAsync(Arg.Is<ApplicationLocalizationRequestDto>(x => x.CultureName == serverCulture)); |
|||
} |
|||
} |
|||
|
|||
private static ApplicationConfigurationDto CreateConfigDto(string cultureName) |
|||
{ |
|||
return new ApplicationConfigurationDto |
|||
{ |
|||
Localization = |
|||
{ |
|||
CurrentCulture = new CurrentCultureDto { Name = cultureName } |
|||
} |
|||
}; |
|||
} |
|||
} |
|||