As a developer, understanding how the factory works is the difference between sending a broken, half-baked request to an LLM and sending a perfectly tuned instruction.
Without the factory, your templates are just text. The factory provides the RenderAsync method, which is arguably the most powerful tool in your debugging arsenal. It allows you to see exactly what the AI will see before you spend a single cent on token costs.
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.Connectors.OpenAI;
// 1. Define the blueprint
var threatReportConfig = new PromptTemplateConfig
{
Name = "ThreatSummary",
Template = """
<system>You are a Senior Security Analyst.</system>
Report Generated: {{ datetime.now }}
ANALYSIS REQUEST:
Identify risks in the following log entry: "{{ $log_entry }}"
Severity Level Requested: {{ $severity }}
Respond in {{ $language }} only.
""",
TemplateFormat = "semantic-kernel",
InputVariables = [
new() { Name = "log_entry", IsRequired = true },
new() { Name = "severity", Default = "High" },
new() { Name = "language", Default = "English" }
]
};
// 2. Instantiate the Factory
var factory = new KernelPromptTemplateFactory();
// 3. Create the Template object from the Config
var template = factory.Create(threatReportConfig);
// 4. Prepare the Arguments (The data to fill the placeholders)
var arguments = new KernelArguments
{
["log_entry"] = "Unauthorized login attempt from IP 192.168.1.105 at 03:00 AM",
["severity"] = "Critical",
["language"] = "English"
};
// 5. RENDER the prompt (Debugging Step)
// This shows exactly what is sent to the LLM.
string renderedPrompt = await template.RenderAsync(kernel, arguments);
Console.WriteLine("--- RENDERED PROMPT START ---");
Console.WriteLine(renderedPrompt);
Console.WriteLine("--- RENDERED PROMPT END ---");
Rendering is the process of "baking" the data into the template. In the code above, RenderAsync performs the following magic:
// See https://aka.ms/new-console-template for more information
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.Connectors.OpenAI;
var apiKey = Environment.GetEnvironmentVariable("OPEN_AI_KEY");
if (string.IsNullOrEmpty(apiKey))
{
Console.WriteLine("Please set the OPEN_AI_KEY environment variable.");
return;
}
var kernel = Kernel.CreateBuilder()
.AddOpenAIChatCompletion(
"gpt-4o",
apiKey)
.Build();
// Add a datetime plugin for {{ datetime.now }}
kernel.ImportPluginFromFunctions("datetime",
[
KernelFunctionFactory.CreateFromMethod(() => DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss"), "now", "Gets current date and time")
]);
Console.WriteLine("=== KernelPromptTemplateFactory Demo: Cybersecurity Threat Analysis ===\n");
// 1. Define the blueprint (PromptTemplateConfig)
var threatReportConfig = new PromptTemplateConfig
{
Name = "ThreatSummary",
Description = "Analyzes security logs and identifies potential threats",
Template = """
<system>You are a Senior Security Analyst with expertise in threat detection and risk assessment.</system>
Report Generated: {{ datetime.now }}
ANALYSIS REQUEST:
Identify risks in the following log entry: "{{ $log_entry }}"
Severity Level Requested: {{ $severity }}
Respond in {{ $language }} only.
Provide:
1. Threat Type
2. Risk Level (1-10)
3. Recommended Actions
4. Potential Impact
""",
TemplateFormat = "semantic-kernel",
InputVariables =
[
new() { Name = "log_entry", Description = "The security log entry to analyze", IsRequired = true },
new() { Name = "severity", Description = "Severity filter level", Default = "High" },
new() { Name = "language", Description = "Response language", Default = "English" }
],
ExecutionSettings = new Dictionary<string, PromptExecutionSettings>
{
["default"] = new OpenAIPromptExecutionSettings
{
MaxTokens = 800,
Temperature = 0.3 // Low temperature for precise security analysis
}
}
};
// 2. Instantiate the Factory (The engine that assembles templates)
var factory = new KernelPromptTemplateFactory();
// 3. Create the Template object from the Config
var template = factory.Create(threatReportConfig);
Console.WriteLine("=== Scenario 1: Critical Unauthorized Access ===\n");
// 4. Prepare the Arguments (The data to fill the placeholders)
var arguments1 = new KernelArguments
{
["log_entry"] = "Unauthorized login attempt from IP 192.168.1.105 at 03:00 AM. Multiple failed attempts detected. User: admin",
["severity"] = "Critical",
["language"] = "English"
};
// 5. RENDER the prompt (Debugging Step - See what will be sent to the AI)
Console.WriteLine("--- RENDERED PROMPT (Before sending to AI) ---");
string renderedPrompt1 = await template.RenderAsync(kernel, arguments1);
Console.WriteLine(renderedPrompt1);
Console.WriteLine("--- END RENDERED PROMPT ---\n");
// 6. Execute the actual AI call
Console.WriteLine("--- AI ANALYSIS RESULT ---");
var result1 = await kernel.InvokePromptAsync(renderedPrompt1, arguments1);
Console.WriteLine(result1);
Console.WriteLine("\n" + new string('=', 80) + "\n");
Console.WriteLine("=== Scenario 2: Suspicious Data Transfer ===\n");
var arguments2 = new KernelArguments
{
["log_entry"] = "Large data transfer detected: 5GB uploaded to external server 203.0.113.45 at 02:30 AM. User: john.doe@company.com",
["severity"] = "High",
["language"] = "English"
};
// Render first to verify
Console.WriteLine("--- RENDERED PROMPT (Before sending to AI) ---");
string renderedPrompt2 = await template.RenderAsync(kernel, arguments2);
Console.WriteLine(renderedPrompt2);
Console.WriteLine("--- END RENDERED PROMPT ---\n");
// Execute
Console.WriteLine("--- AI ANALYSIS RESULT ---");
var result2 = await kernel.InvokePromptAsync(renderedPrompt2, arguments2);
Console.WriteLine(result2);
Console.WriteLine("\n" + new string('=', 80) + "\n");
Console.WriteLine("=== Scenario 3: Using Default Values ===\n");
// Only provide required field, others use defaults
var arguments3 = new KernelArguments
{
["log_entry"] = "Port scan detected from IP 10.0.0.50 targeting ports 22, 80, 443, 3389"
};
Console.WriteLine("--- RENDERED PROMPT (Using Defaults: severity=High, language=English) ---");
string renderedPrompt3 = await template.RenderAsync(kernel, arguments3);
Console.WriteLine(renderedPrompt3);
Console.WriteLine("--- END RENDERED PROMPT ---\n");
Console.WriteLine("--- AI ANALYSIS RESULT ---");
var result3 = await kernel.InvokePromptAsync(renderedPrompt3, arguments3);
Console.WriteLine(result3);
Console.WriteLine("\n" + new string('=', 80) + "\n");
Console.WriteLine("=== Scenario 4: Multi-language Support ===\n");
var arguments4 = new KernelArguments
{
["log_entry"] = "SQL Injection attempt detected in web form. Query: ' OR '1'='1",
["severity"] = "Critical",
["language"] = "Spanish"
};
Console.WriteLine("--- RENDERED PROMPT (Spanish Response) ---");
string renderedPrompt4 = await template.RenderAsync(kernel, arguments4);
Console.WriteLine(renderedPrompt4);
Console.WriteLine("--- END RENDERED PROMPT ---\n");
Console.WriteLine("--- AI ANALYSIS RESULT (in Spanish) ---");
var result4 = await kernel.InvokePromptAsync(renderedPrompt4, arguments4);
Console.WriteLine(result4);
Console.WriteLine("\n" + new string('=', 80) + "\n");
// Demonstrate the power of RenderAsync for debugging
Console.WriteLine("=== Why RenderAsync Matters: Error Detection ===\n");
try
{
// Missing required field
var invalidArguments = new KernelArguments
{
["severity"] = "High"
// Missing "log_entry" which is required!
};
Console.WriteLine("Attempting to render with missing required field...");
var invalidRender = await template.RenderAsync(kernel, invalidArguments);
Console.WriteLine("Render succeeded (no validation in this version)");
Console.WriteLine(invalidRender);
}
catch (Exception ex)
{
Console.WriteLine($"ERROR CAUGHT: {ex.Message}");
Console.WriteLine("This is why RenderAsync is valuable - you catch errors BEFORE calling the expensive AI API!");
}
Console.WriteLine("\n=== Demo Complete ===");
Console.WriteLine("\nKey Takeaways:");
Console.WriteLine("1. PromptTemplateConfig = Blueprint");
Console.WriteLine("2. KernelPromptTemplateFactory = Engine that builds the template");
Console.WriteLine("3. RenderAsync = Preview what AI will see (ALWAYS use in development!)");
Console.WriteLine("4. Catch errors early, save API costs, ensure correctness");