diff --git a/.github/instructions/update_version.instructions.md b/.github/instructions/update_version.instructions.md
index db322826..6df1876e 100644
--- a/.github/instructions/update_version.instructions.md
+++ b/.github/instructions/update_version.instructions.md
@@ -1,7 +1,8 @@
---
applyTo: '**'
---
-After a code change, update the version
+After a code change, update the version.
+If updating in /docs, do not increment the version.
Example
Before Code Changes
diff --git a/.github/workflows/release-notes-check.yml b/.github/workflows/release-notes-check.yml
index 9a9f0d1f..4c88702f 100644
--- a/.github/workflows/release-notes-check.yml
+++ b/.github/workflows/release-notes-check.yml
@@ -25,15 +25,10 @@ jobs:
uses: tj-actions/changed-files@v46.0.1
with:
files_yaml: |
- code:
- - 'application/single_app/**/*.py'
- - 'application/single_app/**/*.js'
- - 'application/single_app/**/*.html'
- - 'application/single_app/**/*.css'
+ application:
+ - 'application/**'
release_notes:
- 'docs/explanation/release_notes.md'
- config:
- - 'application/single_app/config.py'
- name: Check for feature/fix keywords in PR
id: check-keywords
@@ -66,8 +61,7 @@ jobs:
- name: Determine if release notes update is required
id: require-notes
env:
- CODE_CHANGED: ${{ steps.changed-files.outputs.code_any_changed }}
- CONFIG_CHANGED: ${{ steps.changed-files.outputs.config_any_changed }}
+ APPLICATION_CHANGED: ${{ steps.changed-files.outputs.application_any_changed }}
RELEASE_NOTES_CHANGED: ${{ steps.changed-files.outputs.release_notes_any_changed }}
HAS_FEATURE: ${{ steps.check-keywords.outputs.has_feature }}
HAS_FIX: ${{ steps.check-keywords.outputs.has_fix }}
@@ -76,8 +70,7 @@ jobs:
echo "================================"
echo "π PR Analysis Summary"
echo "================================"
- echo "Code files changed: $CODE_CHANGED"
- echo "Config changed: $CONFIG_CHANGED"
+ echo "Application files changed: $APPLICATION_CHANGED"
echo "Release notes updated: $RELEASE_NOTES_CHANGED"
echo "Feature keywords found: $HAS_FEATURE"
echo "Fix keywords found: $HAS_FIX"
@@ -88,15 +81,14 @@ jobs:
needs_notes="false"
reason=""
- if [[ "$HAS_FEATURE" == "true" ]]; then
- needs_notes="true"
- reason="Feature-related keywords detected in PR title/body"
- elif [[ "$HAS_FIX" == "true" ]]; then
- needs_notes="true"
- reason="Fix-related keywords detected in PR title/body"
- elif [[ "$CODE_CHANGED" == "true" && "$CONFIG_CHANGED" == "true" ]]; then
- needs_notes="true"
- reason="Both code and config.py were modified"
+ if [[ "$APPLICATION_CHANGED" == "true" ]]; then
+ if [[ "$HAS_FEATURE" == "true" ]]; then
+ needs_notes="true"
+ reason="Feature-related keywords detected and files under application/ changed"
+ elif [[ "$HAS_FIX" == "true" ]]; then
+ needs_notes="true"
+ reason="Fix-related keywords detected and files under application/ changed"
+ fi
fi
echo "needs_notes=$needs_notes" >> $GITHUB_OUTPUT
@@ -104,11 +96,11 @@ jobs:
- name: Validate release notes update
env:
- CODE_CHANGED: ${{ steps.changed-files.outputs.code_any_changed }}
+ APPLICATION_CHANGED: ${{ steps.changed-files.outputs.application_any_changed }}
RELEASE_NOTES_CHANGED: ${{ steps.changed-files.outputs.release_notes_any_changed }}
NEEDS_NOTES: ${{ steps.require-notes.outputs.needs_notes }}
REASON: ${{ steps.require-notes.outputs.reason }}
- CODE_FILES: ${{ steps.changed-files.outputs.code_all_changed_files }}
+ APPLICATION_FILES: ${{ steps.changed-files.outputs.application_all_changed_files }}
run: |
echo ""
@@ -122,8 +114,8 @@ jobs:
echo "This PR appears to contain changes that should be documented"
echo "in the release notes (docs/explanation/release_notes.md)."
echo ""
- echo "π Code files changed:"
- echo "$CODE_FILES" | tr ' ' '\n' | sed 's/^/ - /'
+ echo "π Application files changed:"
+ echo "$APPLICATION_FILES" | tr ' ' '\n' | sed 's/^/ - /'
echo ""
echo "π‘ Please consider adding an entry to release_notes.md describing:"
echo " β’ New features added"
@@ -138,8 +130,8 @@ jobs:
exit 0
elif [[ "$RELEASE_NOTES_CHANGED" == "true" ]]; then
echo "β Release notes have been updated - great job!"
- elif [[ "$CODE_CHANGED" != "true" ]]; then
- echo "βΉοΈ No significant code changes detected - release notes update not required."
+ elif [[ "$APPLICATION_CHANGED" != "true" ]]; then
+ echo "βΉοΈ No files under application/ changed - release notes update not required."
else
echo "βΉοΈ Changes appear to be minor - release notes update optional."
fi
diff --git a/application/external_apps/databaseseeder/artifacts/admin_settings.json b/application/external_apps/databaseseeder/artifacts/admin_settings.json
index 897285cd..24c1c7ea 100644
--- a/application/external_apps/databaseseeder/artifacts/admin_settings.json
+++ b/application/external_apps/databaseseeder/artifacts/admin_settings.json
@@ -119,14 +119,17 @@
"video_indexer_endpoint": "https://api.videoindexer.ai",
"video_indexer_location": "",
"video_indexer_account_id": "",
- "video_indexer_api_key": "",
"video_indexer_resource_group": "",
"video_indexer_subscription_id": "",
"video_indexer_account_name": "",
- "video_indexer_arm_api_version": "2021-11-10-preview",
+ "video_indexer_arm_api_version": "2025-04-01",
"video_index_timeout": 600,
"speech_service_endpoint": "https://eastus.api.cognitive.microsoft.com",
"speech_service_location": "eastus",
+ "speech_service_subscription_id": "",
+ "speech_service_resource_group": "",
+ "speech_service_resource_name": "",
+ "speech_service_resource_id": "",
"speech_service_locale": "en-US",
"speech_service_key": "",
"classification_banner_enabled": true,
diff --git a/application/single_app/config.py b/application/single_app/config.py
index 0f46400c..7196cfe8 100644
--- a/application/single_app/config.py
+++ b/application/single_app/config.py
@@ -94,7 +94,7 @@
EXECUTOR_TYPE = 'thread'
EXECUTOR_MAX_WORKERS = 30
SESSION_TYPE = 'filesystem'
-VERSION = "0.241.004"
+VERSION = "0.241.006"
SECRET_KEY = os.getenv('SECRET_KEY', 'dev-secret-key-change-in-production')
diff --git a/application/single_app/foundry_agent_runtime.py b/application/single_app/foundry_agent_runtime.py
index 4de7f35a..0a88fb46 100644
--- a/application/single_app/foundry_agent_runtime.py
+++ b/application/single_app/foundry_agent_runtime.py
@@ -64,6 +64,14 @@ class FoundryAgentInvocationError(RuntimeError):
"""Raised when the Foundry agent invocation cannot be completed."""
+def _normalize_max_completion_tokens(value: Any) -> Optional[int]:
+ try:
+ normalized = int(value)
+ except (TypeError, ValueError):
+ return None
+ return normalized if normalized > 0 else None
+
+
class AzureAIFoundryChatCompletionAgent:
"""Lightweight wrapper so Foundry agents behave like SK chat agents."""
@@ -107,6 +115,7 @@ def invoke(
global_settings=self._global_settings,
message_history=history,
metadata=metadata,
+ max_completion_tokens=self.max_completion_tokens,
)
)
except RuntimeError:
@@ -145,6 +154,7 @@ async def invoke_stream(
global_settings=self._global_settings,
message_history=list(messages),
metadata={},
+ max_completion_tokens=self.max_completion_tokens,
)
self.last_run_citations = result.citations
self.last_run_model = result.model
@@ -194,6 +204,7 @@ def invoke(
global_settings=self._global_settings,
message_history=history,
metadata=metadata,
+ max_completion_tokens=self.max_completion_tokens,
)
)
self.last_run_citations = result.citations
@@ -211,6 +222,7 @@ async def invoke_stream(
global_settings=self._global_settings,
message_history=list(messages),
metadata={},
+ max_completion_tokens=self.max_completion_tokens,
):
if stream_message.metadata:
citations = stream_message.metadata.get("citations")
@@ -228,6 +240,7 @@ async def execute_foundry_agent(
global_settings: Dict[str, Any],
message_history: List[ChatMessageContent],
metadata: Dict[str, Any],
+ max_completion_tokens: Optional[int] = None,
) -> FoundryAgentInvocationResult:
"""Invoke a Foundry agent using Semantic Kernel's AzureAIAgent abstraction."""
@@ -248,15 +261,20 @@ async def execute_foundry_agent(
endpoint=endpoint,
api_version=api_version,
)
+ resolved_max_completion_tokens = _normalize_max_completion_tokens(max_completion_tokens)
try:
definition = await client.agents.get_agent(agent_id)
azure_agent = AzureAIAgent(client=client, definition=definition)
responses = []
- async for response in azure_agent.invoke(
- messages=message_history,
- metadata={k: str(v) for k, v in metadata.items() if v is not None},
- ):
+ invoke_kwargs = {
+ "messages": message_history,
+ "metadata": {k: str(v) for k, v in metadata.items() if v is not None},
+ }
+ if resolved_max_completion_tokens is not None:
+ invoke_kwargs["max_completion_tokens"] = resolved_max_completion_tokens
+
+ async for response in azure_agent.invoke(**invoke_kwargs):
responses.append(response)
if not responses:
@@ -299,6 +317,7 @@ async def execute_foundry_agent(
"endpoint": endpoint,
"model": model_value,
"message_length": len(text or ""),
+ "max_completion_tokens": resolved_max_completion_tokens,
},
)
@@ -321,6 +340,7 @@ async def execute_new_foundry_agent(
global_settings: Dict[str, Any],
message_history: List[ChatMessageContent],
metadata: Dict[str, Any],
+ max_completion_tokens: Optional[int] = None,
) -> FoundryAgentInvocationResult:
"""Invoke the new Foundry application runtime through its Responses protocol endpoint."""
@@ -343,7 +363,12 @@ async def execute_new_foundry_agent(
f"{endpoint.rstrip('/')}/applications/{quote(application_name, safe='')}/"
"protocols/openai/responses"
)
- payload = _build_new_foundry_request_payload(message_history, metadata, stream=False)
+ payload = _build_new_foundry_request_payload(
+ message_history,
+ metadata,
+ stream=False,
+ max_output_tokens=_normalize_max_completion_tokens(max_completion_tokens),
+ )
headers = {
"Authorization": f"Bearer {token.token}",
"Content-Type": "application/json",
@@ -376,6 +401,7 @@ async def execute_new_foundry_agent(
"endpoint": endpoint,
"model": result.model,
"message_length": len(result.message),
+ "max_output_tokens": payload.get("max_output_tokens"),
},
)
@@ -390,6 +416,7 @@ async def execute_new_foundry_agent_stream(
global_settings: Dict[str, Any],
message_history: List[ChatMessageContent],
metadata: Dict[str, Any],
+ max_completion_tokens: Optional[int] = None,
) -> AsyncIterator[FoundryAgentStreamMessage]:
"""Stream a new Foundry application response through the Responses API."""
@@ -413,7 +440,12 @@ async def execute_new_foundry_agent_stream(
"protocols/openai/responses"
)
debug_print(f"Invoking new Foundry application '{application_name}' at {endpoint} with streaming to url {url} with api-version {responses_api_version}")
- payload = _build_new_foundry_request_payload(message_history, metadata, stream=True)
+ payload = _build_new_foundry_request_payload(
+ message_history,
+ metadata,
+ stream=True,
+ max_output_tokens=_normalize_max_completion_tokens(max_completion_tokens),
+ )
headers = {
"Authorization": f"Bearer {token.token}",
"Content-Type": "application/json",
@@ -692,6 +724,7 @@ def _build_new_foundry_request_payload(
message_history: List[ChatMessageContent],
metadata: Dict[str, Any],
stream: bool = False,
+ max_output_tokens: Optional[int] = None,
) -> Dict[str, Any]:
input_items: List[Dict[str, Any]] = []
for message in message_history:
@@ -733,6 +766,8 @@ def _build_new_foundry_request_payload(
}
if normalized_metadata:
payload["metadata"] = normalized_metadata
+ if max_output_tokens is not None:
+ payload["max_output_tokens"] = max_output_tokens
return payload
diff --git a/application/single_app/functions_authentication.py b/application/single_app/functions_authentication.py
index 8bdf4b5c..a0ecde0a 100644
--- a/application/single_app/functions_authentication.py
+++ b/application/single_app/functions_authentication.py
@@ -385,7 +385,7 @@ def get_video_indexer_managed_identity_token(settings, video_id=None):
rg = settings["video_indexer_resource_group"]
sub = settings["video_indexer_subscription_id"]
acct = settings["video_indexer_account_name"]
- api_ver = settings.get("video_indexer_arm_api_version", "2021-11-10-preview")
+ api_ver = settings.get("video_indexer_arm_api_version", DEFAULT_VIDEO_INDEXER_ARM_API_VERSION)
debug_print(f"[VIDEO INDEXER AUTH] Settings extracted - Subscription: {sub}, Resource Group: {rg}, Account: {acct}, API Version: {api_ver}")
diff --git a/application/single_app/functions_documents.py b/application/single_app/functions_documents.py
index 7bff48d8..7c6e4a27 100644
--- a/application/single_app/functions_documents.py
+++ b/application/single_app/functions_documents.py
@@ -94,6 +94,27 @@ def get_document_blob_storage_info(document_item, user_id=None, group_id=None, p
)
+def _has_persisted_blob_reference(document_item):
+ if not document_item:
+ return False
+
+ if document_item.get("blob_path"):
+ return True
+
+ return (
+ document_item.get("blob_path_mode") == ARCHIVED_REVISION_BLOB_PATH_MODE
+ and bool(document_item.get("archived_blob_path"))
+ )
+
+
+def _normalize_document_enhanced_citations(document_item):
+ if not document_item:
+ return document_item
+
+ document_item["enhanced_citations"] = _has_persisted_blob_reference(document_item)
+ return document_item
+
+
def get_document_blob_delete_targets(document_item, user_id=None, group_id=None, public_workspace_id=None):
targets = []
seen = set()
@@ -317,7 +338,9 @@ def select_current_documents(documents):
current_documents = []
for family_documents in families.values():
- current_documents.append(_choose_current_document(family_documents))
+ current_documents.append(
+ _normalize_document_enhanced_citations(_choose_current_document(family_documents))
+ )
return current_documents
@@ -666,6 +689,7 @@ def create_document(file_name, user_id, document_id, num_file_chunks, status, gr
"status": status,
"percentage_complete": 0,
"document_classification": carried_forward.get("document_classification", "None"),
+ "enhanced_citations": False,
"type": "document_metadata",
"public_workspace_id": public_workspace_id,
"user_id": user_id,
@@ -697,6 +721,7 @@ def create_document(file_name, user_id, document_id, num_file_chunks, status, gr
"status": status,
"percentage_complete": 0,
"document_classification": carried_forward.get("document_classification", "None"),
+ "enhanced_citations": False,
"type": "document_metadata",
"group_id": group_id,
"blob_container": _get_blob_container_name(group_id=group_id),
@@ -728,6 +753,7 @@ def create_document(file_name, user_id, document_id, num_file_chunks, status, gr
"status": status,
"percentage_complete": 0,
"document_classification": carried_forward.get("document_classification", "None"),
+ "enhanced_citations": False,
"type": "document_metadata",
"user_id": user_id,
"blob_container": _get_blob_container_name(),
@@ -823,7 +849,7 @@ def get_document_metadata(document_id, user_id, group_id=None, public_workspace_
user_id=public_workspace_id if is_public_workspace else (group_id if is_group else user_id),
content=f"Document metadata retrieved: {document_items}."
)
- return document_items[0] if document_items else None
+ return _normalize_document_enhanced_citations(document_items[0]) if document_items else None
except Exception as e:
print(f"Error retrieving document metadata: {repr(e)}\nTraceback:\n{traceback.format_exc()}")
@@ -2775,7 +2801,7 @@ def get_document(user_id, document_id, group_id=None, public_workspace_id=None):
if not document_results:
return jsonify({'error': 'Document not found or access denied'}), 404
- return jsonify(document_results[0]), 200
+ return jsonify(_normalize_document_enhanced_citations(document_results[0])), 200
except Exception as e:
return jsonify({'error': f'Error retrieving document: {str(e)}'}), 500
@@ -2863,7 +2889,7 @@ def get_document_version(user_id, document_id, version, group_id=None, public_wo
if not document_results:
return jsonify({'error': 'Document version not found'}), 404
- return jsonify(document_results[0]), 200
+ return jsonify(_normalize_document_enhanced_citations(document_results[0])), 200
except Exception as e:
return jsonify({'error': f'Error retrieving document version: {str(e)}'}), 500
@@ -4158,6 +4184,7 @@ def upload_to_blob(temp_file_path, user_id, document_id, blob_filename, update_c
current_document["blob_container"] = storage_account_container_name
current_document["blob_path"] = blob_path
current_document["blob_path_mode"] = CURRENT_ALIAS_BLOB_PATH_MODE
+ current_document["enhanced_citations"] = True
if current_document.get("archived_blob_path") is None:
current_document["archived_blob_path"] = None
cosmos_container.upsert_item(current_document)
@@ -6242,6 +6269,34 @@ def _get_speech_config(settings, endpoint: str, locale: str):
print(f"[Debug] Speech config obtained successfully", flush=True)
return speech_config
+
+def get_speech_synthesis_config(settings, endpoint: str, location: str):
+ """Get speech synthesis config for either key or managed identity auth."""
+ auth_type = settings.get("speech_service_authentication_type")
+
+ if auth_type == "managed_identity":
+ resource_id = (settings.get("speech_service_resource_id") or "").strip()
+ if not location:
+ raise ValueError("Speech service location is required for text-to-speech with managed identity.")
+ if not resource_id:
+ raise ValueError("Speech service resource ID is required for text-to-speech with managed identity.")
+
+ credential = DefaultAzureCredential()
+ token = credential.get_token(cognitive_services_scope)
+ authorization_token = f"aad#{resource_id}#{token.token}"
+ speech_config = speechsdk.SpeechConfig(auth_token=authorization_token, region=location)
+ else:
+ key = (settings.get("speech_service_key") or "").strip()
+ if not endpoint:
+ raise ValueError("Speech service endpoint is required for text-to-speech.")
+ if not key:
+ raise ValueError("Speech service key is required for text-to-speech when using key authentication.")
+
+ speech_config = speechsdk.SpeechConfig(endpoint=endpoint, subscription=key)
+
+ print(f"[Debug] Speech synthesis config obtained successfully", flush=True)
+ return speech_config
+
def process_audio_document(
document_id: str,
user_id: str,
diff --git a/application/single_app/functions_global_agents.py b/application/single_app/functions_global_agents.py
index 7fecf1ee..51870b9c 100644
--- a/application/single_app/functions_global_agents.py
+++ b/application/single_app/functions_global_agents.py
@@ -51,7 +51,7 @@ def ensure_default_global_agent_exists():
),
"actions_to_load": [],
"other_settings": {},
- "max_completion_tokens": 4096
+ "max_completion_tokens": -1
}
save_global_agent(default_agent)
log_event(
diff --git a/application/single_app/functions_settings.py b/application/single_app/functions_settings.py
index 324f82fc..8d09ee61 100644
--- a/application/single_app/functions_settings.py
+++ b/application/single_app/functions_settings.py
@@ -372,6 +372,10 @@ def get_settings(use_cosmos=False, include_source=False):
# Audio file settings with Azure speech service
"speech_service_endpoint": '',
"speech_service_location": '',
+ "speech_service_subscription_id": '',
+ "speech_service_resource_group": '',
+ "speech_service_resource_name": '',
+ "speech_service_resource_id": '',
"speech_service_locale": "en-US",
"speech_service_key": "",
"speech_service_authentication_type": "key", # 'key' or 'managed_identity'
diff --git a/application/single_app/route_backend_chats.py b/application/single_app/route_backend_chats.py
index c6e99a62..e16d7242 100644
--- a/application/single_app/route_backend_chats.py
+++ b/application/single_app/route_backend_chats.py
@@ -3890,6 +3890,15 @@ def is_tabular_access_limited_analysis(analysis_text):
'do not have direct access',
"don't have",
'do not have',
+ "doesn't include the full",
+ 'does not include the full',
+ 'only sample rows',
+ 'only workbook metadata',
+ 'only sample rows and workbook metadata',
+ 'cannot accurately list all',
+ 'cannot accurately list them',
+ 'from the current evidence',
+ 'from the evidence provided',
'visible excerpt you provided',
'if those tool-backed results exist',
'allow me to query again',
@@ -3898,6 +3907,80 @@ def is_tabular_access_limited_analysis(analysis_text):
return any(phrase in normalized_analysis for phrase in inaccessible_phrases)
+def get_tabular_result_coverage_summary(invocations):
+ """Return whether successful analytical tool calls produced full or partial result coverage."""
+ coverage_summary = {
+ 'has_full_result_coverage': False,
+ 'has_partial_result_coverage': False,
+ }
+
+ for invocation in invocations or []:
+ result_payload = get_tabular_invocation_result_payload(invocation) or {}
+
+ total_matches = parse_tabular_result_count(result_payload.get('total_matches'))
+ returned_rows = parse_tabular_result_count(result_payload.get('returned_rows'))
+ if total_matches is not None and returned_rows is not None:
+ if returned_rows >= total_matches:
+ coverage_summary['has_full_result_coverage'] = True
+ else:
+ coverage_summary['has_partial_result_coverage'] = True
+
+ distinct_count = parse_tabular_result_count(result_payload.get('distinct_count'))
+ returned_values = parse_tabular_result_count(result_payload.get('returned_values'))
+ if distinct_count is not None and returned_values is not None:
+ if returned_values >= distinct_count:
+ coverage_summary['has_full_result_coverage'] = True
+ else:
+ coverage_summary['has_partial_result_coverage'] = True
+
+ if result_payload.get('full_rows_included') or result_payload.get('full_values_included'):
+ coverage_summary['has_full_result_coverage'] = True
+ if result_payload.get('sample_rows_limited') or result_payload.get('values_limited'):
+ coverage_summary['has_partial_result_coverage'] = True
+
+ if (
+ coverage_summary['has_full_result_coverage']
+ and coverage_summary['has_partial_result_coverage']
+ ):
+ break
+
+ return coverage_summary
+
+
+def build_tabular_success_execution_gap_messages(user_question, analysis_text, invocations):
+ """Return retry guidance when a successful tabular analysis still produced an incomplete answer."""
+ coverage_summary = get_tabular_result_coverage_summary(invocations)
+ has_full_result_coverage = coverage_summary['has_full_result_coverage']
+ has_partial_result_coverage = coverage_summary['has_partial_result_coverage']
+ wants_exhaustive_results = question_requests_tabular_exhaustive_results(user_question)
+ execution_gap_messages = []
+
+ if is_tabular_access_limited_analysis(analysis_text):
+ if wants_exhaustive_results and has_full_result_coverage:
+ execution_gap_messages.append(
+ 'Previous attempt still claimed only sample rows or workbook metadata were available even though successful analytical tool calls returned the full matching result set. Answer directly from those returned rows and list the full results the user asked for.'
+ )
+ elif has_full_result_coverage:
+ execution_gap_messages.append(
+ 'Previous attempt still claimed the requested data was unavailable even though successful analytical tool calls returned the full matching result set. Use the returned rows and answer directly.'
+ )
+ else:
+ execution_gap_messages.append(
+ 'Previous attempt still claimed the requested data was unavailable even though analytical tool calls succeeded. Use the returned rows and answer directly.'
+ )
+
+ if (
+ wants_exhaustive_results
+ and has_partial_result_coverage
+ and not has_full_result_coverage
+ ):
+ execution_gap_messages.append(
+ 'The user asked for a full list, but previous analytical calls returned only a partial slice. Rerun the relevant analytical call with a higher max_rows or max_values before answering.'
+ )
+
+ return execution_gap_messages
+
+
def _select_likely_workbook_sheet(sheet_names, question_text, per_sheet=None, score_match_fn=None):
"""Return a likely sheet name when the user question strongly matches one sheet."""
score_match_fn = score_match_fn or _score_tabular_sheet_match
@@ -4408,7 +4491,8 @@ def build_system_prompt(force_tool_use=False, tool_error_messages=None,
"12. Summarize concrete found records sheet-by-sheet using the tool results, not schema placeholders.\n"
"13. For count or percentage questions involving a cohort defined on one sheet and facts on another, prefer get_distinct_values, count_rows, filter_rows_by_related_values, or count_rows_by_related_values over manually counting sampled rows.\n"
"14. Use normalize_match=true when matching names, owners, assignees, engineers, or similar entity-text columns across worksheets.\n"
- "15. Do not mention hypothetical follow-up analyses, parser errors, or failed attempts unless the user explicitly asked about failures and you have actual tool error output to report."
+ "15. If a successful tool result reports returned_rows == total_matches or returned_values == distinct_count, treat that as the full matching result set. Do not claim that only sample rows or workbook metadata are available in that case.\n"
+ "16. Do not mention hypothetical follow-up analyses, parser errors, or failed attempts unless the user explicitly asked about failures and you have actual tool error output to report."
)
return (
@@ -4461,8 +4545,9 @@ def build_system_prompt(force_tool_use=False, tool_error_messages=None,
"22. For identifier-based workbook questions, locate the identifier on the correct sheet before explaining downstream calculations.\n"
"23. For peak, busiest, highest, or lowest questions, use grouped functions and inspect the highest_group, highest_value, lowest_group, and lowest_value summary fields.\n"
"24. Return only computed findings and name the strongest drivers clearly.\n"
- "25. Do not mention hypothetical follow-up analyses, parser errors, or failed attempts unless the user explicitly asked about failures and you have actual tool error output to report.\n"
- "26. When using query_tabular_data, use simple DataFrame.query() syntax with backticked column names for columns containing spaces. Avoid method calls such as .str.lower(), .astype(...), or other Python expressions that DataFrame.query() may reject."
+ "25. If a successful tool result reports returned_rows == total_matches or returned_values == distinct_count, treat that as the full matching result set. Do not claim that only sample rows or workbook metadata are available in that case.\n"
+ "26. Do not mention hypothetical follow-up analyses, parser errors, or failed attempts unless the user explicitly asked about failures and you have actual tool error output to report.\n"
+ "27. When using query_tabular_data, use simple DataFrame.query() syntax with backticked column names for columns containing spaces. Avoid method calls such as .str.lower(), .astype(...), or other Python expressions that DataFrame.query() may reject."
)
baseline_invocations = plugin_logger.get_invocations_for_conversation(
@@ -4631,10 +4716,19 @@ def build_system_prompt(force_tool_use=False, tool_error_messages=None,
previous_tool_error_messages = []
previous_failed_call_parameters = []
previous_discovery_feedback_messages = []
+ execution_gap_messages = []
+ selected_sheets = []
+ coverage_summary = get_tabular_result_coverage_summary(
+ successful_analytical_invocations
+ )
+ retry_gap_messages = build_tabular_success_execution_gap_messages(
+ user_question,
+ analysis,
+ successful_analytical_invocations,
+ )
if entity_lookup_mode:
selected_sheets = get_tabular_invocation_selected_sheets(successful_analytical_invocations)
- execution_gap_messages = []
# Cross-sheet results ("ALL (cross-sheet search)") already span
# the entire workbook β no execution gap for sheet coverage.
@@ -4648,24 +4742,24 @@ def build_system_prompt(force_tool_use=False, tool_error_messages=None,
f"Previous attempt only queried worksheet(s): {rendered_selected_sheets}. The question asks for related records across worksheets, so query additional relevant sheets explicitly with sheet_name."
)
- if is_tabular_access_limited_analysis(analysis):
- execution_gap_messages.append(
- 'Previous attempt still claimed the requested data was unavailable even though analytical tool calls succeeded. Use the returned rows and answer directly.'
- )
+ execution_gap_messages.extend(retry_gap_messages)
- if execution_gap_messages and attempt_number < 3:
- previous_execution_gap_messages = execution_gap_messages
- log_event(
- f"[Tabular SK Analysis] Attempt {attempt_number} entity lookup was incomplete despite successful tool calls; retrying",
- extra={
- 'selected_sheets': selected_sheets,
- 'execution_gaps': previous_execution_gap_messages,
- 'successful_tool_count': len(successful_analytical_invocations),
- },
- level=logging.WARNING,
- )
- baseline_invocation_count = len(invocations_after)
- continue
+ if execution_gap_messages and attempt_number < 3:
+ previous_execution_gap_messages = execution_gap_messages
+ log_event(
+ f"[Tabular SK Analysis] Attempt {attempt_number} analysis was incomplete despite successful tool calls; retrying",
+ extra={
+ 'selected_sheets': selected_sheets,
+ 'execution_gaps': previous_execution_gap_messages,
+ 'successful_tool_count': len(successful_analytical_invocations),
+ 'has_full_result_coverage': coverage_summary.get('has_full_result_coverage', False),
+ 'has_partial_result_coverage': coverage_summary.get('has_partial_result_coverage', False),
+ 'entity_lookup_mode': entity_lookup_mode,
+ },
+ level=logging.WARNING,
+ )
+ baseline_invocation_count = len(invocations_after)
+ continue
previous_execution_gap_messages = []
log_event(
diff --git a/application/single_app/route_backend_tts.py b/application/single_app/route_backend_tts.py
index 11d14cc3..61830490 100644
--- a/application/single_app/route_backend_tts.py
+++ b/application/single_app/route_backend_tts.py
@@ -2,6 +2,8 @@
from config import *
from functions_authentication import *
+from functions_appinsights import log_event
+from functions_documents import get_speech_synthesis_config
from functions_settings import *
from functions_debug import debug_print
from swagger_wrapper import swagger_route, get_auth_security
@@ -41,14 +43,26 @@ def synthesize_speech():
return jsonify({"error": "Text-to-speech is not enabled"}), 403
# Validate speech service configuration
- speech_key = settings.get('speech_service_key', '')
- speech_region = settings.get('speech_service_location', '')
+ speech_endpoint = (settings.get('speech_service_endpoint') or '').strip().rstrip('/')
+ speech_region = (settings.get('speech_service_location') or '').strip()
+ speech_auth_type = settings.get('speech_service_authentication_type', 'key')
- if not speech_key or not speech_region:
- debug_print("[TTS] Speech service not configured - missing key or region")
+ if not speech_endpoint:
+ debug_print("[TTS] Speech service not configured - missing endpoint")
+ return jsonify({"error": "Speech service not configured"}), 500
+
+ if speech_auth_type == 'key' and not (settings.get('speech_service_key') or '').strip():
+ debug_print("[TTS] Speech service not configured - missing key for key authentication")
+ return jsonify({"error": "Speech service not configured"}), 500
+
+ if speech_auth_type == 'managed_identity' and not speech_region:
+ debug_print("[TTS] Speech service not configured - missing location for managed identity")
return jsonify({"error": "Speech service not configured"}), 500
- debug_print(f"[TTS] Speech service configured - region: {speech_region}")
+ debug_print(
+ f"[TTS] Speech service configured - auth_type: {speech_auth_type}, "
+ f"endpoint: {speech_endpoint}, location: {speech_region or 'n/a'}"
+ )
# Parse request data
data = request.get_json()
@@ -71,10 +85,12 @@ def synthesize_speech():
debug_print(f"[TTS] Request params - voice: {voice}, speed: {speed}, text_length: {len(text)}")
# Configure speech service
- speech_config = speechsdk.SpeechConfig(
- subscription=speech_key,
- region=speech_region
- )
+ try:
+ speech_config = get_speech_synthesis_config(settings, speech_endpoint, speech_region)
+ except ValueError as config_error:
+ debug_print(f"[TTS] Speech service configuration invalid: {str(config_error)}")
+ return jsonify({"error": str(config_error)}), 500
+
speech_config.speech_synthesis_voice_name = voice
# Set output format to high quality
diff --git a/application/single_app/route_enhanced_citations.py b/application/single_app/route_enhanced_citations.py
index 29de8313..ca1b9e48 100644
--- a/application/single_app/route_enhanced_citations.py
+++ b/application/single_app/route_enhanced_citations.py
@@ -12,7 +12,7 @@
from functions_authentication import login_required, user_required, get_current_user_id
from functions_settings import get_settings, enabled_required
-from functions_documents import get_document_metadata, get_document_blob_storage_info
+from functions_documents import get_document_metadata
from functions_group import get_user_groups
from functions_public_workspaces import get_user_visible_public_workspace_ids_from_settings
from swagger_wrapper import swagger_route, get_auth_security
@@ -90,15 +90,13 @@ def get_enhanced_citation_document_metadata():
return doc_response, status_code
raw_doc = doc_response.get_json()
- _, blob_path = get_document_blob_storage_info(raw_doc)
-
return jsonify({
"id": raw_doc.get("id"),
"document_id": raw_doc.get("id"),
"file_name": raw_doc.get("file_name"),
"version": raw_doc.get("version"),
"is_current_version": raw_doc.get("is_current_version"),
- "enhanced_citations": bool(blob_path),
+ "enhanced_citations": bool(raw_doc.get("enhanced_citations", False)),
}), 200
except Exception as e:
diff --git a/application/single_app/route_frontend_admin_settings.py b/application/single_app/route_frontend_admin_settings.py
index 94053752..129dfcde 100644
--- a/application/single_app/route_frontend_admin_settings.py
+++ b/application/single_app/route_frontend_admin_settings.py
@@ -367,6 +367,9 @@ def admin_settings():
'admin_settings.html',
app_settings=settings_for_template,
settings=settings_for_template,
+ azure_environment=AZURE_ENVIRONMENT,
+ default_video_indexer_endpoint=video_indexer_endpoint,
+ default_video_indexer_arm_api_version=DEFAULT_VIDEO_INDEXER_ARM_API_VERSION,
user_settings=user_settings,
update_available=update_available,
latest_version=latest_version,
@@ -1325,12 +1328,16 @@ def is_valid_url(url):
'video_indexer_resource_group': form_data.get('video_indexer_resource_group', '').strip(),
'video_indexer_subscription_id': form_data.get('video_indexer_subscription_id', '').strip(),
'video_indexer_account_name': form_data.get('video_indexer_account_name', '').strip(),
- 'video_indexer_arm_api_version': form_data.get('video_indexer_arm_api_version', '2024-01-01').strip(),
+ 'video_indexer_arm_api_version': form_data.get('video_indexer_arm_api_version', DEFAULT_VIDEO_INDEXER_ARM_API_VERSION).strip(),
'video_index_timeout': int(form_data.get('video_index_timeout', 600)),
# Audio file settings with Azure speech service
'speech_service_endpoint': form_data.get('speech_service_endpoint', '').strip(),
'speech_service_location': form_data.get('speech_service_location', '').strip(),
+ 'speech_service_subscription_id': form_data.get('speech_service_subscription_id', '').strip(),
+ 'speech_service_resource_group': form_data.get('speech_service_resource_group', '').strip(),
+ 'speech_service_resource_name': form_data.get('speech_service_resource_name', '').strip(),
+ 'speech_service_resource_id': form_data.get('speech_service_resource_id', '').strip(),
'speech_service_locale': form_data.get('speech_service_locale', '').strip(),
'speech_service_authentication_type': form_data.get('speech_service_authentication_type', 'key'),
'speech_service_key': form_data.get('speech_service_key', '').strip(),
diff --git a/application/single_app/static/js/admin/admin_settings.js b/application/single_app/static/js/admin/admin_settings.js
index 896bf6b3..7861b801 100644
--- a/application/single_app/static/js/admin/admin_settings.js
+++ b/application/single_app/static/js/admin/admin_settings.js
@@ -1994,14 +1994,152 @@ function setupToggles() {
}
const speechAuthType = document.getElementById('speech_service_authentication_type');
+ const speechEndpointInput = document.getElementById('speech_service_endpoint');
+ const speechKeyContainer = document.getElementById('speech_service_key_container');
+ const speechResourceIdContainer = document.getElementById('speech_service_resource_id_container');
+ const speechResourceIdInput = document.getElementById('speech_service_resource_id');
+ const speechSubscriptionInput = document.getElementById('speech_service_subscription_id');
+ const speechResourceGroupInput = document.getElementById('speech_service_resource_group');
+ const speechResourceNameInput = document.getElementById('speech_service_resource_name');
+ const buildSpeechResourceIdButton = document.getElementById('build_speech_resource_id_btn');
+ const speechResourceIdBuilderStatus = document.getElementById('speech_resource_id_builder_status');
+
+ function inferSpeechResourceNameFromEndpoint(endpointValue) {
+ const trimmedValue = (endpointValue || '').trim();
+ if (!trimmedValue) {
+ return '';
+ }
+
+ try {
+ const parsedUrl = new URL(trimmedValue);
+ const hostName = parsedUrl.hostname.toLowerCase();
+ const supportedSuffixes = [
+ '.cognitiveservices.azure.com',
+ '.cognitiveservices.azure.us'
+ ];
+
+ for (const suffix of supportedSuffixes) {
+ if (hostName.endsWith(suffix)) {
+ const resourceName = hostName.slice(0, -suffix.length);
+ if (resourceName && !resourceName.includes('.')) {
+ return resourceName;
+ }
+ }
+ }
+ } catch (error) {
+ return '';
+ }
+
+ return '';
+ }
+
+ function setSpeechResourceIdBuilderStatus(message) {
+ if (speechResourceIdBuilderStatus) {
+ speechResourceIdBuilderStatus.textContent = message;
+ }
+ }
+
+ function buildSpeechResourceIdFromFields() {
+ const subscriptionId = speechSubscriptionInput?.value?.trim() || '';
+ const resourceGroup = speechResourceGroupInput?.value?.trim() || '';
+ const resourceName = speechResourceNameInput?.value?.trim() || '';
+
+ if (!subscriptionId || !resourceGroup || !resourceName) {
+ return '';
+ }
+
+ return `/subscriptions/${subscriptionId}/resourceGroups/${resourceGroup}/providers/Microsoft.CognitiveServices/accounts/${resourceName}`;
+ }
+
+ function syncSpeechResourceIdBuilder(force) {
+ if (!speechResourceIdInput) {
+ return '';
+ }
+
+ if (speechResourceNameInput && !speechResourceNameInput.value.trim()) {
+ const inferredResourceName = inferSpeechResourceNameFromEndpoint(speechEndpointInput?.value || '');
+ if (inferredResourceName) {
+ speechResourceNameInput.value = inferredResourceName;
+ }
+ }
+
+ const builtResourceId = buildSpeechResourceIdFromFields();
+ const currentValue = speechResourceIdInput.value.trim();
+ const previousGeneratedValue = speechResourceIdInput.dataset.generatedValue || '';
+ const wasGenerated = speechResourceIdInput.dataset.generated === 'true' || currentValue === '' || currentValue === previousGeneratedValue;
+
+ if (builtResourceId) {
+ speechResourceIdInput.dataset.generatedValue = builtResourceId;
+ if (force || wasGenerated) {
+ speechResourceIdInput.value = builtResourceId;
+ speechResourceIdInput.dataset.generated = 'true';
+ }
+ setSpeechResourceIdBuilderStatus('Resource ID can be generated from the helper fields. You can still override it manually if needed.');
+ return builtResourceId;
+ }
+
+ const missingParts = [];
+ if (!speechSubscriptionInput?.value?.trim()) {
+ missingParts.push('Subscription ID');
+ }
+ if (!speechResourceGroupInput?.value?.trim()) {
+ missingParts.push('Resource Group');
+ }
+ if (!speechResourceNameInput?.value?.trim()) {
+ missingParts.push('Speech Resource Name');
+ }
+
+ speechResourceIdInput.dataset.generatedValue = '';
+ if (speechResourceIdInput.dataset.generated === 'true' && !currentValue) {
+ speechResourceIdInput.dataset.generated = 'false';
+ }
+
+ setSpeechResourceIdBuilderStatus(`To auto-build the resource ID, provide: ${missingParts.join(', ')}.`);
+ return '';
+ }
+
if (speechAuthType) {
+ const updateSpeechAuthFields = function () {
+ const usingKeyAuth = this.value === 'key';
+ setSectionVisibility(speechKeyContainer, usingKeyAuth);
+ setSectionVisibility(speechResourceIdContainer, !usingKeyAuth);
+ };
+
+ updateSpeechAuthFields.call(speechAuthType);
speechAuthType.addEventListener('change', function () {
- document.getElementById('speech_service_key_container').style.display =
- (this.value === 'key') ? 'block' : 'none';
+ updateSpeechAuthFields.call(this);
markFormAsModified();
});
}
+ if (speechResourceIdInput) {
+ syncSpeechResourceIdBuilder(false);
+ speechResourceIdInput.addEventListener('input', function () {
+ const builtResourceId = buildSpeechResourceIdFromFields();
+ this.dataset.generated = builtResourceId && this.value.trim() === builtResourceId ? 'true' : 'false';
+ });
+ }
+
+ [speechEndpointInput, speechSubscriptionInput, speechResourceGroupInput, speechResourceNameInput].forEach((element) => {
+ if (!element) {
+ return;
+ }
+
+ element.addEventListener('input', () => {
+ syncSpeechResourceIdBuilder(false);
+ markFormAsModified();
+ });
+ });
+
+ if (buildSpeechResourceIdButton) {
+ buildSpeechResourceIdButton.addEventListener('click', () => {
+ const builtResourceId = syncSpeechResourceIdBuilder(true);
+ if (builtResourceId) {
+ markFormAsModified();
+ }
+ });
+ }
+
const officeAuthType = document.getElementById('office_docs_authentication_type');
const connStrGroup = document.getElementById('office_docs_storage_conn_str_group');
const urlGroup = document.getElementById('office_docs_storage_url_group');
@@ -3434,29 +3572,104 @@ function togglePassword(btnId, inputId) {
}
}
+function setSectionVisibility(element, visible) {
+ if (!element) {
+ return;
+ }
+
+ element.classList.toggle('d-none', !visible);
+}
+
// --- Video Indexer Settings toggle ---
const videoSupportToggle = document.getElementById('enable_video_file_support');
-const videoIndexerDiv = document.getElementById('video_indexer_settings');
+const videoIndexerDiv = document.getElementById('video_indexer_settings');
+const videoIndexerCloudSelect = document.getElementById('video_indexer_cloud');
+const videoIndexerEndpointInput = document.getElementById('video_indexer_endpoint');
+const videoIndexerEndpointDisplay = document.getElementById('video_indexer_endpoint_display');
+const videoIndexerCustomEndpointGroup = document.getElementById('video_indexer_custom_endpoint_group');
+const videoIndexerCustomEndpointInput = document.getElementById('video_indexer_custom_endpoint');
+const videoIndexerCloudMismatchAlert = document.getElementById('video_indexer_cloud_mismatch_alert');
+
+function updateVideoIndexerEndpointSelection() {
+ if (!videoIndexerCloudSelect || !videoIndexerEndpointInput) {
+ return;
+ }
+
+ const selectedCloud = videoIndexerCloudSelect.value;
+ const publicEndpoint = videoIndexerCloudSelect.dataset.publicEndpoint || 'https://api.videoindexer.ai';
+ const governmentEndpoint = videoIndexerCloudSelect.dataset.governmentEndpoint || 'https://api.videoindexer.ai.azure.us';
+ const runtimeCloud = videoIndexerCloudSelect.dataset.runtimeCloud || 'public';
+
+ let endpointValue = publicEndpoint;
+ if (selectedCloud === 'usgovernment') {
+ endpointValue = governmentEndpoint;
+ } else if (selectedCloud === 'custom') {
+ endpointValue = videoIndexerCustomEndpointInput?.value?.trim() || '';
+ }
+
+ videoIndexerEndpointInput.value = endpointValue;
+
+ if (videoIndexerEndpointDisplay) {
+ videoIndexerEndpointDisplay.value = endpointValue;
+ }
+
+ setSectionVisibility(videoIndexerCustomEndpointGroup, selectedCloud === 'custom');
+ setSectionVisibility(videoIndexerCloudMismatchAlert, selectedCloud !== runtimeCloud);
+
+ if (typeof updateVideoIndexerModalInfo === 'function') {
+ updateVideoIndexerModalInfo();
+ }
+}
+
if (videoSupportToggle && videoIndexerDiv) {
- // on load
- videoIndexerDiv.style.display = videoSupportToggle.checked ? 'block' : 'none';
- // on change
- videoSupportToggle.addEventListener('change', () => {
- videoIndexerDiv.style.display = videoSupportToggle.checked ? 'block' : 'none';
- markFormAsModified();
- });
+ setSectionVisibility(videoIndexerDiv, videoSupportToggle.checked);
+ videoSupportToggle.addEventListener('change', () => {
+ setSectionVisibility(videoIndexerDiv, videoSupportToggle.checked);
+ markFormAsModified();
+ });
+}
+
+if (videoIndexerCloudSelect) {
+ updateVideoIndexerEndpointSelection();
+ videoIndexerCloudSelect.addEventListener('change', () => {
+ updateVideoIndexerEndpointSelection();
+ markFormAsModified();
+ });
+}
+
+if (videoIndexerCustomEndpointInput) {
+ videoIndexerCustomEndpointInput.addEventListener('input', () => {
+ updateVideoIndexerEndpointSelection();
+ markFormAsModified();
+ });
}
// --- Speech Service Settings toggle ---
-const audioSupportToggle = document.getElementById('enable_audio_file_support');
-const audioServiceDiv = document.getElementById('audio_service_settings');
-if (audioSupportToggle && audioServiceDiv) {
- // initial visibility
- audioServiceDiv.style.display = audioSupportToggle.checked ? 'block' : 'none';
- audioSupportToggle.addEventListener('change', () => {
- audioServiceDiv.style.display = audioSupportToggle.checked ? 'block' : 'none';
- markFormAsModified();
- });
+const audioSupportToggle = document.getElementById('enable_audio_file_support');
+const speechToTextToggle = document.getElementById('enable_speech_to_text_input');
+const textToSpeechToggle = document.getElementById('enable_text_to_speech');
+const audioServiceDiv = document.getElementById('audio_service_settings');
+
+function areAnySpeechFeaturesEnabled() {
+ return [audioSupportToggle, speechToTextToggle, textToSpeechToggle].some((toggle) => Boolean(toggle?.checked));
+}
+
+function updateSpeechServiceSettingsVisibility() {
+ setSectionVisibility(audioServiceDiv, areAnySpeechFeaturesEnabled());
+}
+
+if (audioServiceDiv) {
+ updateSpeechServiceSettingsVisibility();
+ [audioSupportToggle, speechToTextToggle, textToSpeechToggle].forEach((toggle) => {
+ if (!toggle) {
+ return;
+ }
+
+ toggle.addEventListener('change', () => {
+ updateSpeechServiceSettingsVisibility();
+ markFormAsModified();
+ });
+ });
}
// Metadata Extraction UI
@@ -3495,12 +3708,12 @@ function populateExtractionModels() {
}
if (extractToggle) {
- // show/hide the model dropdown
- extractModelDiv.style.display = extractToggle.checked ? 'block' : 'none';
- extractToggle.addEventListener('change', () => {
+ // show/hide the model dropdown
extractModelDiv.style.display = extractToggle.checked ? 'block' : 'none';
- markFormAsModified();
- });
+ extractToggle.addEventListener('change', () => {
+ extractModelDiv.style.display = extractToggle.checked ? 'block' : 'none';
+ markFormAsModified();
+ });
}
// Multi-Modal Vision UI
@@ -3509,232 +3722,232 @@ const visionModelDiv = document.getElementById('multimodal_vision_model_settings
const visionSelect = document.getElementById('multimodal_vision_model');
function populateVisionModels() {
- if (!visionSelect) return;
+ if (!visionSelect) return;
- // remember previously chosen value
- const prev = visionSelect.getAttribute('data-prev') || '';
-
- // clear out old options (except the placeholder)
- visionSelect.innerHTML = '';
-
- if (document.getElementById('enable_gpt_apim').checked) {
- // use comma-separated APIM deployments
- const text = document.getElementById('azure_apim_gpt_deployment').value || '';
- text.split(',')
- .map(s => s.trim())
- .filter(s => s)
- .forEach(d => {
- const opt = new Option(d, d);
- visionSelect.add(opt);
+ // remember previously chosen value
+ const prev = visionSelect.getAttribute('data-prev') || '';
+
+ // clear out old options (except the placeholder)
+ visionSelect.innerHTML = '';
+
+ if (document.getElementById('enable_gpt_apim').checked) {
+ // use comma-separated APIM deployments
+ const text = document.getElementById('azure_apim_gpt_deployment').value || '';
+ text.split(',')
+ .map(s => s.trim())
+ .filter(s => s)
+ .forEach(d => {
+ const opt = new Option(d, d);
+ visionSelect.add(opt);
+ });
+ } else {
+ // use direct GPT selected deployments - filter for vision-capable models
+ (window.gptSelected || []).forEach(m => {
+ // Only include models with vision capabilities
+ // Vision-enabled models per Azure OpenAI docs:
+ // - o-series reasoning models (o1, o3, etc.)
+ // - GPT-5 series
+ // - GPT-4.1 series
+ // - GPT-4.5
+ // - GPT-4o series (gpt-4o, gpt-4o-mini)
+ // - GPT-4 vision models (gpt-4-vision, gpt-4-turbo-vision)
+ const modelNameLower = (m.modelName || '').toLowerCase();
+ const isVisionCapable =
+ modelNameLower.includes('vision') ||
+ modelNameLower.includes('gpt-4o') ||
+ modelNameLower.includes('gpt-4.1') ||
+ modelNameLower.includes('gpt-4.5') ||
+ modelNameLower.includes('gpt-5') ||
+ modelNameLower.match(/^o\d+/) ||
+ modelNameLower.includes('o1-') ||
+ modelNameLower.includes('o3-');
+
+ if (isVisionCapable) {
+ const label = `${m.deploymentName} (${m.modelName})`;
+ const opt = new Option(label, m.deploymentName);
+ visionSelect.add(opt);
+ }
});
- } else {
- // use direct GPT selected deployments - filter for vision-capable models
- (window.gptSelected || []).forEach(m => {
- // Only include models with vision capabilities
- // Vision-enabled models per Azure OpenAI docs:
- // - o-series reasoning models (o1, o3, etc.)
- // - GPT-5 series
- // - GPT-4.1 series
- // - GPT-4.5
- // - GPT-4o series (gpt-4o, gpt-4o-mini)
- // - GPT-4 vision models (gpt-4-vision, gpt-4-turbo-vision)
- const modelNameLower = (m.modelName || '').toLowerCase();
- const isVisionCapable =
- modelNameLower.includes('vision') || // gpt-4-vision, gpt-4-turbo-vision
- modelNameLower.includes('gpt-4o') || // gpt-4o, gpt-4o-mini
- modelNameLower.includes('gpt-4.1') || // gpt-4.1 series
- modelNameLower.includes('gpt-4.5') || // gpt-4.5
- modelNameLower.includes('gpt-5') || // gpt-5 series
- modelNameLower.match(/^o\d+/) || // o1, o3, etc. (o-series)
- modelNameLower.includes('o1-') || // o1-preview, o1-mini
- modelNameLower.includes('o3-'); // o3-mini, etc.
-
- if (isVisionCapable) {
- const label = `${m.deploymentName} (${m.modelName})`;
- const opt = new Option(label, m.deploymentName);
- visionSelect.add(opt);
- }
- });
- }
+ }
- // restore previous
- if (prev) {
- visionSelect.value = prev;
- }
+ // restore previous
+ if (prev) {
+ visionSelect.value = prev;
+ }
}
if (visionToggle && visionModelDiv) {
- // show/hide the model dropdown
- visionModelDiv.style.display = visionToggle.checked ? 'block' : 'none';
- visionToggle.addEventListener('change', () => {
+ // show/hide the model dropdown
visionModelDiv.style.display = visionToggle.checked ? 'block' : 'none';
- markFormAsModified();
- });
+ visionToggle.addEventListener('change', () => {
+ visionModelDiv.style.display = visionToggle.checked ? 'block' : 'none';
+ markFormAsModified();
+ });
}
// Listen for vision model selection changes
if (visionSelect) {
- visionSelect.addEventListener('change', () => {
- // Update data-prev to remember the selection
- visionSelect.setAttribute('data-prev', visionSelect.value);
- markFormAsModified();
- });
+ visionSelect.addEventListener('change', () => {
+ // Update data-prev to remember the selection
+ visionSelect.setAttribute('data-prev', visionSelect.value);
+ markFormAsModified();
+ });
}
-// when APIMβtoggle flips, repopulate
+// when APIM-toggle flips, repopulate
const apimToggle = document.getElementById('enable_gpt_apim');
if (apimToggle) {
- apimToggle.addEventListener('change', () => {
- populateExtractionModels();
- populateVisionModels();
- });
+ apimToggle.addEventListener('change', () => {
+ populateExtractionModels();
+ populateVisionModels();
+ });
}
// on load, stash previous & populate
document.addEventListener('DOMContentLoaded', () => {
- if (extractSelect) {
- extractSelect.setAttribute('data-prev', extractSelect.value);
- populateExtractionModels();
- }
- if (visionSelect) {
- visionSelect.setAttribute('data-prev', visionSelect.value);
- populateVisionModels();
- }
+ if (extractSelect) {
+ extractSelect.setAttribute('data-prev', extractSelect.value);
+ populateExtractionModels();
+ }
+ if (visionSelect) {
+ visionSelect.setAttribute('data-prev', visionSelect.value);
+ populateVisionModels();
+ }
});
document.addEventListener('DOMContentLoaded', () => {
- ['user','group','public'].forEach(type => {
- const warnDiv = document.getElementById(`index-warning-${type}`);
- const missingSpan = document.getElementById(`missing-fields-${type}`);
- const fixBtn = document.getElementById(`fix-${type}-index-btn`);
+ ['user','group','public'].forEach(type => {
+ const warnDiv = document.getElementById(`index-warning-${type}`);
+ const missingSpan = document.getElementById(`missing-fields-${type}`);
+ const fixBtn = document.getElementById(`fix-${type}-index-btn`);
- // 1) check for missing fields
- fetch('/api/admin/settings/check_index_fields', {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json'
- },
- credentials: 'same-origin',
- body: JSON.stringify({ indexType: type })
- })
- .then(r => {
- if (!r.ok) {
- return r.json().then(errorData => {
- throw new Error(errorData.error || `HTTP ${r.status}: ${r.statusText}`);
- });
- }
- return r.json();
- })
- .then(response => {
- if (response.autoFixed) {
- // Fields were automatically fixed
- console.log(`β Auto-fixed ${type} index: added ${response.fieldsAdded.length} field(s):`, response.fieldsAdded.join(', '));
- if (warnDiv) {
- warnDiv.className = 'alert alert-success';
- missingSpan.textContent = `Automatically added ${response.fieldsAdded.length} field(s): ${response.fieldsAdded.join(', ')}`;
- warnDiv.style.display = 'block';
- if (fixBtn) fixBtn.style.display = 'none';
-
- // Hide success message after 5 seconds
- setTimeout(() => {
- warnDiv.style.display = 'none';
- }, 5000);
- }
- } else if (response.autoFixFailed) {
- // Auto-fix failed, show manual button
- console.warn(`Auto-fix failed for ${type} index:`, response.error);
- missingSpan.textContent = response.missingFields.join(', ') + ' (Auto-fix failed - please fix manually)';
- warnDiv.className = 'alert alert-warning';
- warnDiv.style.display = 'block';
- if (fixBtn) {
- fixBtn.textContent = `Fix ${type} Index Fields`;
- fixBtn.style.display = 'inline-block';
- }
- } else if (response.missingFields && response.missingFields.length > 0) {
- // Missing fields but auto-fix was disabled
- missingSpan.textContent = response.missingFields.join(', ');
- warnDiv.className = 'alert alert-warning';
- warnDiv.style.display = 'block';
- if (fixBtn) {
- fixBtn.textContent = `Fix ${type} Index Fields`;
- fixBtn.style.display = 'inline-block';
- }
- } else if (response.indexExists) {
- // Index exists and is complete
- if (warnDiv) warnDiv.style.display = 'none';
- console.log(`${type} index is properly configured`);
- }
- })
- .catch(err => {
- console.warn(`Checking ${type} index fields:`, err.message);
+ // 1) check for missing fields
+ fetch('/api/admin/settings/check_index_fields', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ credentials: 'same-origin',
+ body: JSON.stringify({ indexType: type })
+ })
+ .then(r => {
+ if (!r.ok) {
+ return r.json().then(errorData => {
+ throw new Error(errorData.error || `HTTP ${r.status}: ${r.statusText}`);
+ });
+ }
+ return r.json();
+ })
+ .then(response => {
+ if (response.autoFixed) {
+ // Fields were automatically fixed
+ console.log(`β Auto-fixed ${type} index: added ${response.fieldsAdded.length} field(s):`, response.fieldsAdded.join(', '));
+ if (warnDiv) {
+ warnDiv.className = 'alert alert-success';
+ missingSpan.textContent = `Automatically added ${response.fieldsAdded.length} field(s): ${response.fieldsAdded.join(', ')}`;
+ warnDiv.style.display = 'block';
+ if (fixBtn) fixBtn.style.display = 'none';
+
+ // Hide success message after 5 seconds
+ setTimeout(() => {
+ warnDiv.style.display = 'none';
+ }, 5000);
+ }
+ } else if (response.autoFixFailed) {
+ // Auto-fix failed, show manual button
+ console.warn(`Auto-fix failed for ${type} index:`, response.error);
+ missingSpan.textContent = response.missingFields.join(', ') + ' (Auto-fix failed - please fix manually)';
+ warnDiv.className = 'alert alert-warning';
+ warnDiv.style.display = 'block';
+ if (fixBtn) {
+ fixBtn.textContent = `Fix ${type} Index Fields`;
+ fixBtn.style.display = 'inline-block';
+ }
+ } else if (response.missingFields && response.missingFields.length > 0) {
+ // Missing fields but auto-fix was disabled
+ missingSpan.textContent = response.missingFields.join(', ');
+ warnDiv.className = 'alert alert-warning';
+ warnDiv.style.display = 'block';
+ if (fixBtn) {
+ fixBtn.textContent = `Fix ${type} Index Fields`;
+ fixBtn.style.display = 'inline-block';
+ }
+ } else if (response.indexExists) {
+ // Index exists and is complete
+ if (warnDiv) warnDiv.style.display = 'none';
+ console.log(`${type} index is properly configured`);
+ }
+ })
+ .catch(err => {
+ console.warn(`Checking ${type} index fields:`, err.message);
- // Check if this is an index not found error
- if (err.message.includes('does not exist yet') || err.message.includes('not found')) {
- // Show a different message for missing index
- if (warnDiv && missingSpan && fixBtn) {
- missingSpan.textContent = `Index "${type}" does not exist yet`;
- warnDiv.style.display = 'block';
- fixBtn.textContent = `Create ${type} Index`;
- fixBtn.style.display = 'inline-block';
- fixBtn.dataset.action = 'create';
- }
- } else if (err.message.includes('not configured')) {
- // Azure AI Search not configured
- if (warnDiv && missingSpan) {
- missingSpan.textContent = 'Azure AI Search not configured';
- warnDiv.style.display = 'block';
- if (fixBtn) fixBtn.style.display = 'none';
- }
- } else {
- // Hide the warning div for other errors
- if (warnDiv) warnDiv.style.display = 'none';
- }
- });
+ // Check if this is an index not found error
+ if (err.message.includes('does not exist yet') || err.message.includes('not found')) {
+ // Show a different message for missing index
+ if (warnDiv && missingSpan && fixBtn) {
+ missingSpan.textContent = `Index "${type}" does not exist yet`;
+ warnDiv.style.display = 'block';
+ fixBtn.textContent = `Create ${type} Index`;
+ fixBtn.style.display = 'inline-block';
+ fixBtn.dataset.action = 'create';
+ }
+ } else if (err.message.includes('not configured')) {
+ // Azure AI Search not configured
+ if (warnDiv && missingSpan) {
+ missingSpan.textContent = 'Azure AI Search not configured';
+ warnDiv.style.display = 'block';
+ if (fixBtn) fixBtn.style.display = 'none';
+ }
+ } else {
+ // Hide the warning div for other errors
+ if (warnDiv) warnDiv.style.display = 'none';
+ }
+ });
- // 2) wire up the βfixβ button
- fixBtn.addEventListener('click', () => {
- fixBtn.disabled = true;
- const action = fixBtn.dataset.action || 'fix';
- const endpoint = action === 'create' ? '/api/admin/settings/create_index' : '/api/admin/settings/fix_index_fields';
- const actionText = action === 'create' ? 'Creating' : 'Fixing';
+ // 2) wire up the fix button
+ fixBtn.addEventListener('click', () => {
+ fixBtn.disabled = true;
+ const action = fixBtn.dataset.action || 'fix';
+ const endpoint = action === 'create' ? '/api/admin/settings/create_index' : '/api/admin/settings/fix_index_fields';
+ const actionText = action === 'create' ? 'Creating' : 'Fixing';
- fixBtn.textContent = `${actionText}...`;
+ fixBtn.textContent = `${actionText}...`;
- fetch(endpoint, {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json'
- },
- credentials: 'same-origin',
- body: JSON.stringify({ indexType: type })
- })
- .then(r => {
- if (!r.ok) {
- return r.json().then(errorData => {
- throw new Error(errorData.error || `HTTP ${r.status}: ${r.statusText}`);
+ fetch(endpoint, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ credentials: 'same-origin',
+ body: JSON.stringify({ indexType: type })
+ })
+ .then(r => {
+ if (!r.ok) {
+ return r.json().then(errorData => {
+ throw new Error(errorData.error || `HTTP ${r.status}: ${r.statusText}`);
+ });
+ }
+ return r.json();
+ })
+ .then(resp => {
+ if (resp.status === 'success') {
+ alert(resp.message || `Successfully ${action === 'create' ? 'created' : 'fixed'} ${type} index!`);
+ window.location.reload();
+ } else {
+ alert(`Failed to ${action} ${type} index: ${resp.error}`);
+ fixBtn.disabled = false;
+ fixBtn.textContent = `${action === 'create' ? 'Create' : 'Fix'} ${type} Index`;
+ }
+ })
+ .catch(err => {
+ alert(`Error ${action === 'create' ? 'creating' : 'fixing'} ${type} index: ${err.message || err}`);
+ fixBtn.disabled = false;
+ fixBtn.textContent = `${action === 'create' ? 'Create' : 'Fix'} ${type} Index`;
+ });
});
- }
- return r.json();
- })
- .then(resp => {
- if (resp.status === 'success') {
- alert(resp.message || `Successfully ${action === 'create' ? 'created' : 'fixed'} ${type} index!`);
- window.location.reload();
- } else {
- alert(`Failed to ${action} ${type} index: ${resp.error}`);
- fixBtn.disabled = false;
- fixBtn.textContent = `${action === 'create' ? 'Create' : 'Fix'} ${type} Index`;
- }
- })
- .catch(err => {
- alert(`Error ${action === 'create' ? 'creating' : 'fixing'} ${type} index: ${err.message || err}`);
- fixBtn.disabled = false;
- fixBtn.textContent = `${action === 'create' ? 'Create' : 'Fix'} ${type} Index`;
});
- });
});
- });
togglePassword('toggle_gpt_key', 'azure_openai_gpt_key');
@@ -3756,7 +3969,6 @@ togglePassword('toggle_audio_files_key', 'audio_files_key');
togglePassword('toggle_office_conn_str', 'office_docs_storage_account_blob_endpoint');
togglePassword('toggle_video_conn_str', 'video_files_storage_account_url');
togglePassword('toggle_audio_conn_str', 'audio_files_storage_account_url');
-togglePassword('toggle_video_indexer_api_key', 'video_indexer_api_key');
togglePassword('toggle_speech_service_key', 'speech_service_key');
togglePassword('toggle_redis_key', 'redis_key');
togglePassword('toggle_azure_apim_redis_subscription_key', 'azure_apim_redis_subscription_key');
@@ -4050,6 +4262,9 @@ function calculateAvailableWalkthroughSteps() {
const videoEnabled = document.getElementById('enable_video_file_support')?.checked || false;
const audioEnabled = document.getElementById('enable_audio_file_support')?.checked || false;
+ const speechToTextEnabled = document.getElementById('enable_speech_to_text_input')?.checked || false;
+ const textToSpeechEnabled = document.getElementById('enable_text_to_speech')?.checked || false;
+ const speechFeaturesEnabled = audioEnabled || speechToTextEnabled || textToSpeechEnabled;
const availableSteps = [1, 2, 3, 4]; // Base steps always available
@@ -4060,10 +4275,10 @@ function calculateAvailableWalkthroughSteps() {
if (videoEnabled) {
availableSteps.push(8); // Video support
}
-
- if (audioEnabled) {
- availableSteps.push(9); // Audio support
- }
+ }
+
+ if (speechFeaturesEnabled) {
+ availableSteps.push(9); // Shared Speech Service
}
// Optional steps always available
@@ -4123,8 +4338,10 @@ function findNextApplicableStep(currentStep) {
case 9: // Audio support
const audioEnabled = document.getElementById('enable_audio_file_support')?.checked || false;
- if (!workspacesEnabled || !audioEnabled) {
- // Skip this step if workspaces not enabled or audio not enabled
+ const speechToTextEnabled = document.getElementById('enable_speech_to_text_input')?.checked || false;
+ const textToSpeechEnabled = document.getElementById('enable_text_to_speech')?.checked || false;
+ if (!(audioEnabled || speechToTextEnabled || textToSpeechEnabled)) {
+ // Skip this step if no speech features are enabled
nextStep++;
continue;
}
@@ -4390,25 +4607,48 @@ function isStepComplete(stepNumber) {
const videoEndpoint = document.getElementById('video_indexer_endpoint')?.value;
const videoLocation = document.getElementById('video_indexer_location')?.value;
const videoAccountId = document.getElementById('video_indexer_account_id')?.value;
-
- return videoLocation && videoAccountId && videoEndpoint;
+ const videoResourceGroup = document.getElementById('video_indexer_resource_group')?.value;
+ const videoSubscriptionId = document.getElementById('video_indexer_subscription_id')?.value;
+ const videoAccountName = document.getElementById('video_indexer_account_name')?.value;
+
+ return Boolean(
+ videoLocation &&
+ videoAccountId &&
+ videoEndpoint &&
+ videoResourceGroup &&
+ videoSubscriptionId &&
+ videoAccountName
+ );
case 9: // Audio support
const audioEnabled = document.getElementById('enable_audio_file_support').checked || false;
+ const speechToTextEnabled = document.getElementById('enable_speech_to_text_input')?.checked || false;
+ const textToSpeechEnabled = document.getElementById('enable_text_to_speech')?.checked || false;
+ const speechFeaturesEnabled = audioEnabled || speechToTextEnabled || textToSpeechEnabled;
- // If workspaces not enabled or audio not enabled, it's always complete
- if (!workspacesEnabled || !audioEnabled) return true;
+ // If no speech features are enabled, it's always complete
+ if (!speechFeaturesEnabled) return true;
// Otherwise check settings
const speechEndpoint = document.getElementById('speech_service_endpoint')?.value;
const authType = document.getElementById('speech_service_authentication_type').value;
const key = document.getElementById('speech_service_key').value;
-
- if (!speechEndpoint || (authType === 'key' && !key)) {
- return false;
- } else {
- return true;
+ const speechLocation = document.getElementById('speech_service_location')?.value;
+ const speechResourceId = document.getElementById('speech_service_resource_id')?.value;
+
+ if (!speechEndpoint) {
+ return false;
}
+
+ if (authType === 'key') {
+ return Boolean(key);
+ }
+
+ if (textToSpeechEnabled) {
+ return Boolean(speechLocation && speechResourceId);
+ }
+
+ return true;
case 10: // Content safety - always complete (optional)
case 11: // User feedback and archiving - always complete (optional)
@@ -4608,14 +4848,26 @@ function setupWalkthroughFieldListeners() {
],
8: [ // Video settings
{selector: '#enable_video_file_support', event: 'change'},
+ {selector: '#video_indexer_cloud', event: 'change'},
+ {selector: '#video_indexer_custom_endpoint', event: 'input'},
{selector: '#video_indexer_location', event: 'input'},
{selector: '#video_indexer_account_id', event: 'input'},
- {selector: '#video_indexer_api_key', event: 'input'}
+ {selector: '#video_indexer_resource_group', event: 'input'},
+ {selector: '#video_indexer_subscription_id', event: 'input'},
+ {selector: '#video_indexer_account_name', event: 'input'}
],
9: [ // Audio settings
{selector: '#enable_audio_file_support', event: 'change'},
+ {selector: '#enable_speech_to_text_input', event: 'change'},
+ {selector: '#enable_text_to_speech', event: 'change'},
{selector: '#speech_service_endpoint', event: 'input'},
- {selector: '#speech_service_key', event: 'input'}
+ {selector: '#speech_service_authentication_type', event: 'change'},
+ {selector: '#speech_service_subscription_id', event: 'input'},
+ {selector: '#speech_service_resource_group', event: 'input'},
+ {selector: '#speech_service_resource_name', event: 'input'},
+ {selector: '#speech_service_key', event: 'input'},
+ {selector: '#speech_service_location', event: 'input'},
+ {selector: '#speech_service_resource_id', event: 'input'}
]
};
diff --git a/application/single_app/static/json/schemas/agent.schema.json b/application/single_app/static/json/schemas/agent.schema.json
index 64f91251..11f17de0 100644
--- a/application/single_app/static/json/schemas/agent.schema.json
+++ b/application/single_app/static/json/schemas/agent.schema.json
@@ -110,7 +110,7 @@
"type": "integer",
"minimum": -1,
"maximum": 512000,
- "default": 4096
+ "default": -1
}
},
"required": [
diff --git a/application/single_app/templates/_speech_service_info.html b/application/single_app/templates/_speech_service_info.html
new file mode 100644
index 00000000..fad60130
--- /dev/null
+++ b/application/single_app/templates/_speech_service_info.html
@@ -0,0 +1,330 @@
+
+
+
+
+
+
+
+ Azure AI Voice Conversations Configuration Guide
+
+
+
+
+
+
+ What is Azure AI Voice Conversations? In Simple Chat, Azure AI Speech powers three related experiences from one shared Speech resource: audio file transcription, live voice input, and spoken AI responses.
+
+
+
+
+ Managed identity note: For App Service deployments, managed identity is the recommended setup. When you choose managed identity, use the resource-specific custom-domain Speech endpoint and Speech-specific RBAC roles. If you also enable Voice Responses, provide the full Speech Resource ID.
+
+
+
+
+
Current Configuration
+
+
+
+
+ Audio Upload Enabled:
+ No
+
+
+ Voice Input Enabled:
+ No
+
+
+ Voice Responses Enabled:
+ No
+
+
+
+
+ Authentication Type:
+ Not configured
+
+
+ Speech Endpoint:
+ Not configured
+
+
+
+
+ Location:
+ Not configured
+
+
+ Locale:
+ Not configured
+
+
+ Speech Resource Name:
+ Not configured
+
+
+
+
+ Speech Resource ID:
+ Not configured
+
+
+ Subscription ID:
+ Not configured
+
+
+ Resource Group:
+ Not configured
+
+
+
+ Resource ID is only required when Voice Responses and Managed Identity are both enabled.
+
+
+
+
+
+
+
Create or Choose an Azure AI Speech Resource
+
+
+
+
Prerequisites
+
+
An Azure subscription
+
An Azure AI Speech resource in the region you want to use
+
Access to your App Service if you plan to use managed identity
+
+
+
+
+
1. Create or Reuse the Speech Resource
+
+
In the Azure portal, create or open an Azure AI Speech resource
+
Choose the region that is closest to your users and workload
+
Decide whether the app will use Managed Identity or Key authentication
+
Note the resource name, subscription ID, and resource group for the Admin Settings form
+
+
+
+
+ Shared-resource model: You do not need separate Speech resources for audio uploads, voice input, and voice responses unless you want to isolate costs or permissions.
+
+
+
+
+
+
+
Turn On the Custom Domain for Managed Identity
+
+
+
+ Required for managed identity: Microsoft Learn requires a custom domain name for Speech SDK Microsoft Entra authentication. This operation is not reversible, so use a test Speech resource first if your production resource already has a lot of Speech Studio assets.
+
+
+
+
Azure Portal Walkthrough
+
+
Open the Azure portal and select your Azure AI Speech resource.
+
In the left navigation under Resource Management, select Networking.
+
Open the Firewalls and virtual networks tab.
+
Select Generate Custom Domain Name.
+
Enter a globally unique custom domain name. The final endpoint will be https://<your-custom-name>.cognitiveservices.azure.com.
+
Select Save and wait for the update to finish.
+
Go to Keys and Endpoint and confirm the endpoint now starts with https://<your-custom-name>.cognitiveservices.azure.com.
+
Copy that endpoint into the Simple Chat Speech Service Endpoint field.
+
+
+
+
+
Optional Azure CLI Path
+
If you prefer CLI, select the correct subscription and turn on the custom domain directly:
+
az account set --subscription <subscription-id>
+az cognitiveservices account update --name <speech-resource-name> --resource-group <resource-group-name> --custom-domain <custom-domain-name>
+
Afterward, verify the endpoint on the Speech resource Keys and Endpoint page before saving it in Admin Settings.
+
+
+
+ What to avoid: Do not leave the endpoint set to a regional hostname like https://eastus.api.cognitive.microsoft.com when Managed Identity is selected. Use the generated custom-domain endpoint instead.
+
+
+
+
+
+
+
Configure Authentication
+
+
+
+
Option A: Managed Identity (Recommended on App Service)
+
+
Enable the system-assigned managed identity on your App Service
+
On the Speech resource, open Access control (IAM) and grant the App Service managed identity the Cognitive Services Speech User role
+
If fast transcription or other write-style Speech operations still fail, also grant Cognitive Services Speech Contributor
+
In Admin Settings, choose Managed Identity, paste the custom-domain endpoint, and set the location
+
If you enable Voice Responses, fill in the Speech Resource ID. The helper fields in Admin Settings can build it for you.
+
+
+
+
+ Role guidance: Speech relies heavily on data-plane permissions. Generic Owner, Contributor, and Cognitive Services Contributor do not provide the same effective Speech access as Cognitive Services Speech User or Cognitive Services Speech Contributor.
+
+
+
+
Option B: API Key
+
+
In the Speech resource, copy a key and the endpoint
+
In Admin Settings, choose Key, paste the endpoint, set the location, and provide the key
+
Speech Resource ID is not required for key-based authentication
+
+
+
+
+
+
+
+
Required Configuration Values
+
+
+
+
+
+
+
Field
+
Description
+
Example
+
Required
+
+
+
+
+
Speech Service Endpoint
+
Speech endpoint used by all enabled voice features
Speech Subscription ID / Resource Group / Resource Name
+
Optional helper fields that can build the Speech Resource ID for you
+
12345678-1234-1234-1234-123456789abc
+
Optional Helper
+
+
+
+
+
+
+
+
+
+
Important Notes
+
+
+
+
One Shared Speech Resource
+
Simple Chat uses one Speech configuration block for audio transcription, voice input, and voice responses. Update it once and then enable only the speech features you want.
+
+
+
+
Managed Identity Endpoint Requirement
+
Managed identity should use the custom-domain Speech endpoint such as https://my-speech-resource.cognitiveservices.azure.com rather than a generic regional endpoint such as https://eastus.api.cognitive.microsoft.com.
+
+
+
+
Resource ID Builder
+
If the portal makes the Speech Resource ID hard to find, enter the subscription ID, resource group, and Speech resource name in Admin Settings and let the built-in helper generate the ARM path.
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/application/single_app/templates/_video_indexer_info.html b/application/single_app/templates/_video_indexer_info.html
index 904ef900..cd806e30 100644
--- a/application/single_app/templates/_video_indexer_info.html
+++ b/application/single_app/templates/_video_indexer_info.html
@@ -12,12 +12,12 @@
- What is Azure AI Video Indexer? Azure AI Video Indexer is a cloud application, part of Azure Applied AI Services, built on Azure AI services (such as Face, Translator, Speech, and Vision). It enables you to extract insights from your videos using managed identity authentication.
+ What is Azure AI Video Indexer? Azure AI Video Indexer is part of Azure Applied AI Services. In Simple Chat, uploaded videos are processed through the Azure Resource Manager Video Indexer flow using managed identity rather than legacy account API keys.
- Important: This application uses Managed Identity authentication for Video Indexer. API keys are not supported.
+ Important: Simple Chat uses the App Service system-assigned managed identity at runtime. Azure Video Indexer may also ask you to select a user-assigned managed identity when you create the Video Indexer resource for its own storage/media integration. Those are different identities with different jobs.
@@ -27,10 +27,20 @@
Current Configuration
+
+ Cloud / Endpoint Mode:
+ Not configured
+
Video File Support Enabled:No
+
+
+
+ Account Name:
+ Not configured
+
Video Indexer Endpoint: Not configured
@@ -83,12 +93,16 @@
1. Create Resource in Azure Portal
Create a resource group (or select an existing one) and select a Region
Enter an account name in the Resource name field
Connect the account to a Storage account (must be Standard StorageV2)
-
Select or create a User-assigned managed identity
+
If prompted, select or create a User-assigned managed identity for the Video Indexer resource itself
Agree to the terms and conditions and select Review + create
When validation completes, select Create
+
+ Identity note: The user-assigned managed identity selected during Video Indexer resource creation is not the same identity that Simple Chat uses to call Video Indexer. Simple Chat still uses the App Service system-assigned managed identity described below.
+
+
Important: Storage accounts for Video Indexer must be a Standard StorageV2 (general-purpose v2) storage account.
@@ -128,6 +142,10 @@
2. Assign Contributor Role to Video Indexer Resource
+
+ Managed identity only: Do not look for a Video Indexer API key in the Admin Settings page. The current setup depends on the App Service managed identity having Contributor on the Video Indexer resource.
+
+
3. Gather Required Configuration Information
From your Video Indexer resource in the Azure Portal, collect:
@@ -169,10 +187,16 @@
Required Configuration Values
-
API Endpoint
-
Azure Video Indexer API endpoint
+
Cloud / Endpoint Mode
+
Select Azure Public, Azure Government, or Custom Endpoint in Admin Settings
+
Azure Public / Commercial
+
Required
+
+
+
Effective API Endpoint
+
Derived from the selected cloud mode unless you choose Custom Endpoint
https://api.videoindexer.ai
-
Default
+
Derived
Resource Group
@@ -230,12 +254,17 @@
Important Notes
Authentication Method
-
This application uses Managed Identity authentication which provides secure, passwordless access to Azure Video Indexer. Managed Identity must be enabled on your App Service and granted the Contributor role on your Video Indexer resource.
+
Simple Chat uses managed identity authentication for Azure Video Indexer. The App Service system-assigned managed identity must be enabled and granted the Contributor role on your Video Indexer resource.
API Keys Not Supported
-
API key authentication is not supported for Azure Portal-created Video Indexer accounts. If you're using an older account from videoindexer.ai, you'll need to migrate to a managed identity-based ARM account for production use.
+
API key authentication is not used by the current Simple Chat Video Indexer flow. If you still have legacy Video Indexer keys from older account types, keep them out of this configuration path.
+
+
+
+
Cloud Selection
+
The Admin Settings page now lets you choose Azure Public, Azure Government, or Custom Endpoint mode. Keep this aligned with your deployment environment. If the application is running with AZURE_ENVIRONMENT=usgovernment, select Azure Government unless you intentionally override the endpoint.
@@ -245,7 +274,7 @@
Production Requirements
Azure Resource Manager (ARM) Video Indexer account created via Azure Portal
System-Assigned Managed Identity enabled on your App Service
Contributor role assigned to the App Service identity on the Video Indexer resource
+ {% for slug in feature_data.current_release.slugs %}
+ {% assign feature = feature_data.lookup[slug] %}
+ {% include latest_release_card.html feature=feature badge=feature_data.current_release.badge %}
+ {% endfor %}
+
+
+
+ {% for group in feature_data.previous_release_groups %}
+
+
+
+ Archive
+ {{ group.label }}
+ {{ group.description }}
+
+
+
+ v{{ group.release_version }}
+ Show highlights
+
+
+
+
+
+ {% for slug in group.slugs %}
+ {% assign feature = feature_data.lookup[slug] %}
+ {% include latest_release_card.html feature=feature badge=group.badge %}
+ {% endfor %}
+
+
+ {% if group.highlights %}
+
+
Additional highlights from v{{ group.release_version }}
+
+ {% for item in group.highlights %}
+
{{ item }}
+ {% endfor %}
+
+
+ {% endif %}
+
+ {% if group.bug_fixes %}
+
+
Bug fixes kept for reference
+
+ {% for item in group.bug_fixes %}
+
{{ item }}
+ {% endfor %}
+
+
+ {% endif %}
+
+
+ {% endfor %}
+
+
\ No newline at end of file
diff --git a/docs/admin_configuration.md b/docs/admin_configuration.md
index af8a9df1..40a56572 100644
--- a/docs/admin_configuration.md
+++ b/docs/admin_configuration.md
@@ -36,7 +36,7 @@ The walkthrough covers these key configuration areas in order:
6. **Azure AI Search** (Required if workspaces enabled) - Configure search indexing
7. **Document Intelligence** (Required if workspaces enabled) - Configure document processing
8. **Video Support** (Optional, workspace-dependent) - Configure video file processing
-9. **Audio Support** (Optional, workspace-dependent) - Configure audio file processing
+9. **Shared Speech Service** (Optional) - Configure the shared Speech resource used for audio uploads, voice input, and voice responses
10. **Content Safety** (Optional) - Configure content filtering
11. **User Feedback & Archiving** (Optional) - Enable feedback and conversation archiving
12. **Enhanced Features** (Optional) - Enhanced citations and image generation
@@ -131,12 +131,14 @@ Key configuration sections include:
- Support for Direct or APIM routing
- Test connection
- **Multimedia Support** (Video/Audio uploads):
- - **Video Files**: Configure Azure Video Indexer using Managed Identity authentication
- - Resource Group, Subscription ID, Account Name, Location, Account ID
- - API Endpoint, ARM API Version, Timeout
- - **Audio Files**: Configure Speech Service
- - Endpoint, Location/Region, Locale
- - Key/Managed Identity authentication
+ - **Video Files**: Configure Azure Video Indexer with the App Service system-assigned managed identity
+ - Cloud / Endpoint Mode, Resource Group, Subscription ID, Account Name, Location, Account ID
+ - Effective API Endpoint, ARM API Version, Timeout
+ - Video Indexer API keys are not used by the current setup flow
+ - **Shared Speech Service**: One Speech resource powers audio uploads, speech-to-text input, and text-to-speech
+ - Endpoint, Location/Region, Locale, Authentication Type
+ - Key authentication uses the Speech key
+ - Managed identity uses the custom-domain Speech endpoint; voice responses also require the Speech Resource ID
### 7. Agents
- **Agents Configuration**:
@@ -189,7 +191,7 @@ The Admin Settings page supports two navigation layouts:
- **APIM vs Direct**: When using Azure API Management (APIM), you'll need to manually specify model names as automatic model fetching is not available
- **Managed Identity**: When using Managed Identity authentication, ensure your Service Principal has the appropriate roles assigned:
- **Azure OpenAI**: Cognitive Services OpenAI User role
- - **Speech Service**: Cognitive Services Speech Contributor role (requires custom domain name on endpoint)
- - **Video Indexer**: Appropriate Video Indexer roles for your account
+ - **Speech Service**: Start with `Cognitive Services Speech User`; add `Cognitive Services Speech Contributor` if transcription operations still require it. Managed identity also requires the custom-domain Speech endpoint, and text-to-speech needs the Speech Resource ID.
+ - **Video Indexer**: Grant the App Service system-assigned managed identity `Contributor` on the Video Indexer resource. If Azure asks for a user-assigned managed identity during Video Indexer resource creation, that identity is for the Video Indexer resource itself, not for Simple Chat runtime calls.
- **Dependencies**: The walkthrough will alert you if required services aren't configured when you enable dependent features (e.g., workspaces require embeddings, AI Search, and Document Intelligence)
- **Required vs Optional**: The walkthrough clearly indicates which settings are required vs optional based on your configuration choices
\ No newline at end of file
diff --git a/docs/assets/css/main.scss b/docs/assets/css/main.scss
index e77bd72c..740f311c 100644
--- a/docs/assets/css/main.scss
+++ b/docs/assets/css/main.scss
@@ -506,13 +506,11 @@ pre[class*="language-"] {
width: min(36vw, 180px);
}
-.latest-release-hero-image,
.latest-release-card-image {
display: block;
text-decoration: none;
}
-.latest-release-hero-image img,
.latest-release-card-image img,
.latest-release-rich-content img {
border: 1px solid rgba(15, 23, 42, 0.1);
@@ -522,11 +520,6 @@ pre[class*="language-"] {
max-width: 100%;
}
-.latest-release-hero-image img {
- max-height: 360px;
- object-fit: cover;
-}
-
.latest-release-index-content,
.latest-release-feature-content {
margin-top: 1.75rem;
@@ -536,6 +529,69 @@ pre[class*="language-"] {
margin-top: 1.75rem;
}
+.latest-release-thumbnail-trigger {
+ appearance: none;
+ background: transparent;
+ border: 0;
+ cursor: pointer;
+ display: block;
+ max-width: 100%;
+ padding: 0;
+ text-align: left;
+}
+
+.latest-release-thumbnail-trigger:focus-visible {
+ outline: 2px solid #0d6efd;
+ outline-offset: 0.35rem;
+ border-radius: 1rem;
+}
+
+.latest-release-thumbnail-card {
+ display: block;
+}
+
+.latest-release-thumbnail-gallery {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 1rem;
+ max-width: 100%;
+}
+
+.latest-release-thumbnail-gallery .latest-release-thumbnail-trigger {
+ flex: 0 1 240px;
+}
+
+.latest-release-thumbnail-media {
+ background: linear-gradient(135deg, #f8f9fa, #eef2f7);
+ border: 1px solid rgba(0, 0, 0, 0.06);
+ border-radius: 0.85rem;
+ box-shadow: 0 0.7rem 1.6rem rgba(15, 23, 42, 0.08);
+ display: block;
+ max-width: 100%;
+ object-fit: cover;
+ transition: transform 0.2s ease, box-shadow 0.2s ease;
+ width: 240px;
+}
+
+.latest-release-thumbnail-trigger:hover .latest-release-thumbnail-media,
+.latest-release-thumbnail-trigger:focus-visible .latest-release-thumbnail-media {
+ transform: translateY(-2px);
+ box-shadow: 0 0.9rem 1.8rem rgba(15, 23, 42, 0.14);
+}
+
+.latest-release-thumbnail-meta {
+ color: #6c757d;
+ display: block;
+ font-size: 0.9rem;
+ margin-top: 0.55rem;
+}
+
+.latest-release-thumbnail-title {
+ display: block;
+ font-weight: 600;
+ margin-top: 0.55rem;
+}
+
.latest-release-section {
margin-top: 1.5rem;
}
@@ -813,52 +869,54 @@ pre[class*="language-"] {
text-underline-offset: 0.15rem;
}
-.latest-release-gallery-grid {
- display: grid;
- gap: 1rem;
- grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
+.latest-feature-image-modal .modal-dialog {
+ max-width: min(1100px, calc(100vw - 2rem));
}
-.latest-release-gallery-grid--single {
- grid-template-columns: minmax(0, 1fr);
-}
-
-.latest-release-gallery-card {
- background: rgba(255, 255, 255, 0.98);
- border: 1px solid var(--bs-border-color);
+.latest-feature-image-modal .modal-content {
+ background: linear-gradient(180deg, rgba(25, 33, 52, 0.98), rgba(14, 20, 34, 0.98));
+ border: 1px solid rgba(255, 255, 255, 0.12);
border-radius: 1rem;
- box-shadow: 0 0.8rem 1.8rem rgba(15, 23, 42, 0.05);
+ box-shadow: 0 1.4rem 3rem rgba(0, 0, 0, 0.38);
+ color: #f8f9fa;
overflow: hidden;
}
-.latest-release-gallery-link {
- display: block;
+.latest-feature-image-modal .modal-header {
+ background: rgba(255, 255, 255, 0.03);
+ border-color: rgba(248, 249, 250, 0.12);
}
-.latest-release-gallery-link img {
- aspect-ratio: 16 / 10;
- border: 0;
- border-bottom: 1px solid var(--bs-border-color);
- border-radius: 0;
- box-shadow: none;
- object-fit: cover;
- width: 100%;
+.latest-feature-image-modal .modal-body {
+ background: radial-gradient(circle at top, rgba(96, 165, 250, 0.08), transparent 45%), rgba(12, 18, 30, 0.92);
+ padding: 1rem 1.2rem 1.2rem;
}
-.latest-release-gallery-caption {
- display: flex;
- flex-direction: column;
- gap: 0.35rem;
- padding: 1rem 1.05rem 1.05rem;
+.latest-feature-image-modal-caption {
+ color: rgba(248, 250, 252, 0.9);
+ font-size: 0.98rem;
}
-.latest-release-gallery-caption strong {
- font-size: 1rem;
+.latest-feature-image-modal .btn-close {
+ filter: invert(1) grayscale(100%) brightness(200%);
}
-.latest-release-gallery-caption span {
- color: var(--simplechat-secondary);
- line-height: 1.6;
+.latest-feature-image-frame {
+ background: linear-gradient(180deg, rgba(30, 41, 59, 0.9), rgba(15, 23, 42, 0.88));
+ border: 1px solid rgba(148, 163, 184, 0.24);
+ border-radius: 1rem;
+ box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.05);
+ padding: 0.9rem;
+}
+
+.latest-feature-image-modal img {
+ background: rgba(248, 250, 252, 0.98);
+ border-radius: 0.9rem;
+ box-shadow: 0 0.8rem 2rem rgba(0, 0, 0, 0.24);
+ display: block;
+ max-height: 75vh;
+ object-fit: contain;
+ width: 100%;
}
[data-bs-theme="dark"] .latest-release-hero {
@@ -875,7 +933,6 @@ pre[class*="language-"] {
[data-bs-theme="dark"] .latest-release-card-shell,
[data-bs-theme="dark"] .latest-release-card-icon,
[data-bs-theme="dark"] .latest-release-archive-panel,
-[data-bs-theme="dark"] .latest-release-gallery-card,
[data-bs-theme="dark"] .latest-release-rich-content > h2 + p,
[data-bs-theme="dark"] .latest-release-rich-content > h2 + ul,
[data-bs-theme="dark"] .latest-release-rich-content > h2 + ol {
@@ -900,6 +957,7 @@ pre[class*="language-"] {
[data-bs-theme="dark"] .latest-release-footer-note,
[data-bs-theme="dark"] .latest-release-note-panel ul,
[data-bs-theme="dark"] .latest-release-note-panel p,
+[data-bs-theme="dark"] .latest-release-thumbnail-meta,
[data-bs-theme="dark"] .latest-release-breadcrumb a {
color: rgba(226, 232, 240, 0.82);
}
@@ -911,15 +969,30 @@ pre[class*="language-"] {
border-color: rgba(var(--latest-release-accent-rgb), 0.28);
}
-[data-bs-theme="dark"] .latest-release-hero-image img,
[data-bs-theme="dark"] .latest-release-card-image img,
-[data-bs-theme="dark"] .latest-release-gallery-link img,
[data-bs-theme="dark"] .latest-release-rich-content img {
border-color: rgba(148, 163, 184, 0.2);
}
-[data-bs-theme="dark"] .latest-release-gallery-caption span {
- color: rgba(226, 232, 240, 0.8);
+[data-bs-theme="dark"] .latest-release-thumbnail-media {
+ background: linear-gradient(135deg, rgba(15, 23, 42, 0.92), rgba(30, 41, 59, 0.9));
+ border-color: rgba(96, 165, 250, 0.2);
+}
+
+[data-bs-theme="dark"] .latest-feature-image-modal .modal-content {
+ background: linear-gradient(180deg, rgba(26, 32, 44, 0.99), rgba(10, 14, 24, 0.99));
+ border-color: rgba(148, 163, 184, 0.22);
+ box-shadow: 0 1.5rem 3.2rem rgba(0, 0, 0, 0.55);
+}
+
+[data-bs-theme="dark"] .latest-feature-image-modal .modal-body {
+ background: radial-gradient(circle at top, rgba(59, 130, 246, 0.12), transparent 42%), rgba(8, 12, 20, 0.96);
+}
+
+[data-bs-theme="dark"] .latest-feature-image-frame {
+ background: linear-gradient(180deg, rgba(17, 24, 39, 0.96), rgba(30, 41, 59, 0.92));
+ border-color: rgba(96, 165, 250, 0.18);
+ box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.04), 0 0.75rem 1.8rem rgba(0, 0, 0, 0.28);
}
@media (max-width: 991.98px) {
diff --git a/docs/assets/js/latest-release.js b/docs/assets/js/latest-release.js
new file mode 100644
index 00000000..7923c4f2
--- /dev/null
+++ b/docs/assets/js/latest-release.js
@@ -0,0 +1,43 @@
+// latest-release.js
+
+function setupLatestFeatureImageModal() {
+ const modalElement = document.getElementById('latestFeatureImageModal');
+ const modalImage = document.getElementById('latestFeatureImageModalImage');
+ const modalTitle = document.getElementById('latestFeatureImageModalLabel');
+ const modalCaption = document.getElementById('latestFeatureImageModalCaption');
+ const imageTriggers = document.querySelectorAll('[data-latest-feature-image-src]');
+
+ if (!modalElement || !modalImage || !modalTitle || !modalCaption || imageTriggers.length === 0) {
+ return;
+ }
+
+ const imageModal = bootstrap.Modal.getOrCreateInstance(modalElement);
+
+ imageTriggers.forEach((trigger) => {
+ trigger.addEventListener('click', () => {
+ const imageSrc = trigger.dataset.latestFeatureImageSrc;
+ const imageTitle = trigger.dataset.latestFeatureImageTitle || 'Latest Feature Preview';
+ const imageCaption = trigger.dataset.latestFeatureImageCaption || 'Click outside the popup to close it.';
+ const imageAlt = trigger.querySelector('img')?.getAttribute('alt') || imageTitle;
+
+ if (!imageSrc) {
+ return;
+ }
+
+ modalImage.src = imageSrc;
+ modalImage.alt = imageAlt;
+ modalTitle.textContent = imageTitle;
+ modalCaption.textContent = imageCaption;
+ imageModal.show();
+ });
+ });
+
+ modalElement.addEventListener('hidden.bs.modal', () => {
+ modalImage.src = '';
+ modalImage.alt = 'Latest feature preview';
+ });
+}
+
+document.addEventListener('DOMContentLoaded', () => {
+ setupLatestFeatureImageModal();
+});
\ No newline at end of file
diff --git a/docs/explanation/features/AI_VOICE_CONVERSATIONS_SETUP_GUIDE.md b/docs/explanation/features/AI_VOICE_CONVERSATIONS_SETUP_GUIDE.md
new file mode 100644
index 00000000..155dddaf
--- /dev/null
+++ b/docs/explanation/features/AI_VOICE_CONVERSATIONS_SETUP_GUIDE.md
@@ -0,0 +1,85 @@
+# AI Voice Conversations Setup Guide
+
+Version implemented: **0.241.009**
+Enhanced in version: **0.241.010**
+
+## Overview and Purpose
+
+This feature adds a dedicated Setup Guide modal to the Admin Settings AI Voice Conversations card. The guide gives admins an in-app reference for configuring Azure AI Speech for audio uploads, voice input, and voice responses without leaving the settings page.
+
+Dependencies:
+
+- Azure AI Speech
+- Admin Settings Search & Extract tab
+- Shared Speech configuration fields in the AI Voice Conversations card
+
+## Technical Specifications
+
+### Architecture Overview
+
+The feature follows the same UI pattern already used by the Azure AI Video Indexer setup guide:
+
+- A Setup Guide button is rendered in the AI Voice Conversations card header.
+- A dedicated modal partial contains setup guidance, required field descriptions, and a snapshot of the current form state.
+- Client-side modal update logic reads the current Speech settings just before the modal opens.
+
+### Configuration Options
+
+The guide explains the settings used by the shared Speech configuration section:
+
+- Speech Service Endpoint
+- Speech Service Location
+- Speech Service Locale
+- Authentication Type
+- Speech Service Key
+- Speech Resource ID
+- Speech Subscription ID
+- Speech Resource Group
+- Speech Resource Name
+
+### File Structure
+
+- `application/single_app/templates/admin_settings.html`
+- `application/single_app/templates/_speech_service_info.html`
+- `ui_tests/test_admin_multimedia_guidance.py`
+- `functional_tests/test_multimedia_support_reorganization.py`
+
+## Usage Instructions
+
+### How to Open the Guide
+
+1. Go to Admin Settings.
+2. Open the Search & Extract tab.
+3. Find the AI Voice Conversations card.
+4. Click **Setup Guide**.
+
+### What the Guide Covers
+
+1. The shared-resource model for audio uploads, voice input, and voice responses.
+2. The difference between API key and managed-identity authentication.
+3. A step-by-step custom-domain walkthrough for managed identity: **Networking** β **Firewalls and virtual networks** β **Generate Custom Domain Name** β verify on **Keys and Endpoint**.
+4. Speech-specific RBAC role guidance.
+5. When the Speech Resource ID is required and how the built-in helper fields can construct it.
+
+### Custom-Domain Walkthrough
+
+The guide now walks admins through the exact managed-identity endpoint setup flow in Azure:
+
+1. Open the Speech resource in Azure portal.
+2. Go to **Resource Management** β **Networking**.
+3. Open **Firewalls and virtual networks**.
+4. Choose **Generate Custom Domain Name**.
+5. Save a globally unique custom name.
+6. Verify the resulting endpoint in **Keys and Endpoint** before copying it into Simple Chat.
+
+## Testing and Validation
+
+### Test Coverage
+
+- Functional test coverage verifies that the AI Voice setup guide modal is included in the admin settings template and that the Setup Guide trigger is present.
+- UI test coverage verifies that the modal opens from the Admin Settings page and reflects the current shared Speech configuration values.
+
+### Known Limitations
+
+- The guide is informational. It does not provision Azure resources or assign roles automatically.
+- The UI test requires an authenticated admin Playwright session to run end to end.
\ No newline at end of file
diff --git a/docs/explanation/features/index.md b/docs/explanation/features/index.md
index 0b922a77..800ebdae 100644
--- a/docs/explanation/features/index.md
+++ b/docs/explanation/features/index.md
@@ -9,4 +9,8 @@ category: Version History
## Tutorial Features
- [Guided Tutorials](GUIDED_TUTORIALS.md)
-- [User Tutorial Visibility Preference](USER_TUTORIAL_VISIBILITY_PREFERENCE.md)
\ No newline at end of file
+- [User Tutorial Visibility Preference](USER_TUTORIAL_VISIBILITY_PREFERENCE.md)
+
+## Admin Experience Features
+
+- [AI Voice Conversations Setup Guide](AI_VOICE_CONVERSATIONS_SETUP_GUIDE.md)
\ No newline at end of file
diff --git a/docs/explanation/fixes/GROUP_PROMPTS_ROLE_UI_NULL_GUARD_FIX.md b/docs/explanation/fixes/GROUP_PROMPTS_ROLE_UI_NULL_GUARD_FIX.md
new file mode 100644
index 00000000..f0a8019e
--- /dev/null
+++ b/docs/explanation/fixes/GROUP_PROMPTS_ROLE_UI_NULL_GUARD_FIX.md
@@ -0,0 +1,46 @@
+# Group Prompts Role UI Null Guard Fix
+
+Fixed/Implemented in version: **0.241.003**
+
+## Header Information
+
+Issue description:
+The group workspace page could throw a client-side exception while refreshing active group state: `Cannot read properties of null (reading 'style')` from `updateGroupPromptsRoleUI()`.
+
+Root cause analysis:
+The prompt role-toggle logic assumed `create-group-prompt-section` and `group-prompts-role-warning` always existed in the DOM and wrote directly to `.style.display` without checking for missing nodes.
+
+Version implemented:
+`config.py` was updated to `VERSION = "0.241.003"` for this fix.
+
+## Technical Details
+
+Files modified:
+- `application/single_app/templates/group_workspaces.html`
+- `application/single_app/config.py`
+- `ui_tests/test_group_workspace_prompt_role_ui_resilience.py`
+
+Code changes summary:
+- Switched the prompt role warning and create button containers to Bootstrap `d-none` state classes instead of inline `display: none` styles.
+- Updated `updateGroupPromptsRoleUI()` to tolerate missing prompt DOM nodes and toggle visibility via `classList` instead of unsafe `.style` access.
+- Added an early return in `fetchGroupPrompts()` when the prompt table container is unavailable.
+- Added a UI regression test that removes the prompt role UI containers before changing the active group and asserts that no page error is raised.
+
+Testing approach:
+- Added a Playwright UI regression test covering group changes with missing prompt role containers.
+- Targeted validation should include the new UI test file plus a syntax/error pass on the updated template and config version bump.
+
+Impact analysis:
+The group workspace now keeps loading and switching groups even when prompt role UI fragments are omitted, customized, or temporarily unavailable.
+
+## Validation
+
+Test results:
+The regression test is designed to fail on the old `.style` access and pass once the null-safe toggle logic is present.
+
+Before/after comparison:
+- Before: group changes could throw an uncaught promise error from `updateGroupPromptsRoleUI()`.
+- After: group changes skip missing prompt role nodes safely and continue updating the rest of the workspace.
+
+User experience improvements:
+Users no longer see the prompt role UI exception interrupt the group workspace load flow when those prompt elements are missing.
\ No newline at end of file
diff --git a/docs/explanation/fixes/MEDIA_ENHANCED_CITATION_BADGE_FIX.md b/docs/explanation/fixes/MEDIA_ENHANCED_CITATION_BADGE_FIX.md
new file mode 100644
index 00000000..01879805
--- /dev/null
+++ b/docs/explanation/fixes/MEDIA_ENHANCED_CITATION_BADGE_FIX.md
@@ -0,0 +1,48 @@
+# Media Enhanced Citation Badge Fix
+
+Fixed/Implemented in version: **0.241.007**
+
+## Issue Description
+
+Audio and video files uploaded while Enhanced Citations was enabled were stored in Azure Blob Storage and could open through the enhanced citation experience on the chat page, but the workspace document details panel still showed the citation mode as Standard.
+
+## Root Cause Analysis
+
+The workspace document list renders the citation badge from the persisted `enhanced_citations` field on the document metadata record.
+
+Audio and video processing uploaded originals to blob storage, but the metadata record was not updated to set `enhanced_citations` to `true`.
+
+At the same time, the chat-side enhanced citation metadata endpoint could still infer enhanced support from blob-backed document state, so chat behavior and workspace metadata drifted apart.
+
+## Technical Details
+
+Files modified: `application/single_app/functions_documents.py`, `application/single_app/route_enhanced_citations.py`, `application/single_app/config.py`, `functional_tests/test_media_enhanced_citations_metadata_flag.py`
+
+Code changes summary:
+
+- Added normalization helpers so blob-backed documents read back with `enhanced_citations=True` even when older records are missing that field.
+- Updated `upload_to_blob()` to stamp `enhanced_citations=True` on the stored document metadata for new blob-backed uploads.
+- Initialized new document metadata records with `enhanced_citations=False` so the field is always explicit.
+- Updated the enhanced citation document metadata route to use the normalized per-document flag instead of inferring state from a derived blob path.
+
+Impact analysis:
+
+- Existing audio and video documents that already have persisted blob references now render the Enhanced badge in workspace details without requiring re-upload.
+- New blob-backed uploads keep workspace metadata aligned with the chat enhanced citation experience.
+
+## Validation
+
+Test coverage: `functional_tests/test_media_enhanced_citations_metadata_flag.py`
+
+Test results:
+
+- Validates normalization of current and archived blob-backed documents to `enhanced_citations=True`.
+- Validates that blob uploads stamp the document metadata with the enhanced citation flag.
+- Validates that document list/detail reads and the enhanced citation metadata route use the normalized value.
+
+Before/after comparison:
+
+- Before: Blob-backed media could behave as enhanced in chat while still displaying Standard in workspace details.
+- After: Workspace details and chat enhanced citation behavior use the same normalized document metadata state.
+
+Related config.py version update: `VERSION = "0.241.007"`
\ No newline at end of file
diff --git a/docs/explanation/fixes/TABULAR_EXHAUSTIVE_RESULT_SYNTHESIS_FIX.md b/docs/explanation/fixes/TABULAR_EXHAUSTIVE_RESULT_SYNTHESIS_FIX.md
new file mode 100644
index 00000000..347a8cf6
--- /dev/null
+++ b/docs/explanation/fixes/TABULAR_EXHAUSTIVE_RESULT_SYNTHESIS_FIX.md
@@ -0,0 +1,42 @@
+# Tabular Exhaustive Result Synthesis Fix
+
+Fixed/Implemented in version: **0.241.006**
+
+## Issue Description
+
+For exhaustive tabular questions such as "list out all of the security controls," the tabular analysis workflow could successfully execute an analytical tool call that returned the full matching result set, but the inner synthesis step could still answer as though it only had workbook schema samples.
+
+## Root Cause Analysis
+
+The main tabular retry guardrails in [route_backend_chats.py](application/single_app/route_backend_chats.py) only treated this kind of bad synthesis as retry-worthy in entity-lookup mode. General analytical requests could therefore accept a response that claimed only sample rows or workbook metadata were available even after a successful `query_tabular_data` call had returned the full result set.
+
+## Technical Details
+
+### Files Modified
+
+- `application/single_app/route_backend_chats.py`
+- `functional_tests/test_tabular_exhaustive_result_synthesis_fix.py`
+- `application/single_app/config.py`
+
+### Code Changes Summary
+
+- Expanded the access-limited synthesis detector to catch responses that say the data only includes sample rows, workbook metadata, or not the full list.
+- Added result-coverage helpers that distinguish between full and partial analytical result slices.
+- Reused those coverage signals in the primary tabular analysis loop so successful analytical calls can trigger a retry for general analysis mode, not just entity lookup.
+- Added prompt guidance telling the tabular synthesis model to treat `returned_rows == total_matches` and `returned_values == distinct_count` as full result availability.
+
+### Testing Approach
+
+- Added a regression test covering full-result exhaustive list retries.
+- Added a regression test covering partial-result exhaustive list reruns.
+
+## Validation
+
+### Expected Improvement
+
+- Exhaustive list questions no longer stop at a synthesis response that wrongly claims only schema samples are available after successful analytical tool calls.
+- When only a partial slice is returned, the workflow now has explicit retry guidance to rerun the relevant analytical call with a higher limit before answering.
+
+### Related Version Update
+
+- `application/single_app/config.py` updated to `0.241.006`.
\ No newline at end of file
diff --git a/docs/explanation/fixes/TABULAR_RESULT_COVERAGE_REDUNDANT_COMPARISON_FIX.md b/docs/explanation/fixes/TABULAR_RESULT_COVERAGE_REDUNDANT_COMPARISON_FIX.md
new file mode 100644
index 00000000..4cdf10bc
--- /dev/null
+++ b/docs/explanation/fixes/TABULAR_RESULT_COVERAGE_REDUNDANT_COMPARISON_FIX.md
@@ -0,0 +1,40 @@
+# Tabular Result Coverage Redundant Comparison Fix
+
+Fixed/Implemented in version: **0.241.007**
+
+## Issue Description
+
+The tabular result coverage helper in `application/single_app/route_backend_chats.py` used complementary `elif` comparisons immediately after `>=` checks when determining whether returned rows or distinct values covered the full result set.
+
+## Root Cause Analysis
+
+`parse_tabular_result_count()` normalizes these metadata fields to non-negative integers or `None`, and the helper already guards against `None` before comparing. Under those preconditions, `returned_rows >= total_matches` and `returned_values >= distinct_count` fully partition the remaining numeric cases, so the follow-up `<` tests were redundant and triggered static-analysis noise.
+
+## Technical Details
+
+### Files Modified
+
+- `application/single_app/route_backend_chats.py`
+- `functional_tests/test_tabular_exhaustive_result_synthesis_fix.py`
+- `application/single_app/config.py`
+
+### Code Changes Summary
+
+- Replaced the redundant complementary `elif` comparisons with `else` branches in `get_tabular_result_coverage_summary()`.
+- Added regression coverage for partial distinct-value result slices so the touched branch remains explicitly exercised.
+- Updated the application version to `0.241.007`.
+
+### Testing Approach
+
+- Extended the existing tabular exhaustive-result synthesis functional test to verify partial distinct-value coverage is still detected.
+
+## Validation
+
+### Expected Improvement
+
+- CodeQL no longer reports the redundant comparison finding for the tabular result coverage helper.
+- Runtime behavior remains unchanged for valid parsed numeric counts.
+
+### Related Version Update
+
+- `application/single_app/config.py` updated to `0.241.007`.
\ No newline at end of file
diff --git a/docs/explanation/fixes/v0.241.007/SPEECH_VIDEO_INDEXER_GUIDANCE_FIX.md b/docs/explanation/fixes/v0.241.007/SPEECH_VIDEO_INDEXER_GUIDANCE_FIX.md
new file mode 100644
index 00000000..16ad4f07
--- /dev/null
+++ b/docs/explanation/fixes/v0.241.007/SPEECH_VIDEO_INDEXER_GUIDANCE_FIX.md
@@ -0,0 +1,75 @@
+# Speech And Video Indexer Guidance Fix
+
+Fixed in version: **0.241.007**
+
+## Overview
+
+This fix aligns the admin multimedia setup experience with the way Simple Chat actually authenticates to Azure Speech Service and Azure AI Video Indexer.
+
+The update removes stale Video Indexer API-key guidance, adds cloud-aware Video Indexer endpoint selection, and documents the additional Speech Resource ID required for managed-identity text-to-speech.
+
+## Issue Description
+
+Users were encountering conflicting instructions across the admin UI and documentation:
+
+- Video Indexer walkthrough steps still implied an API-key-style setup even though the runtime uses ARM plus managed identity.
+- Video Indexer guidance mixed the managed identity used by the Video Indexer resource itself with the App Service managed identity used by Simple Chat.
+- The shared Speech section implied that all Speech features used the same minimal managed-identity inputs, but text-to-speech also needs the Speech Resource ID.
+
+## Root Cause
+
+The UI, backend, and written guidance drifted over time.
+
+- The admin walkthrough still referenced legacy Video Indexer fields.
+- The admin JavaScript only revealed the Speech configuration when audio uploads were enabled, not when speech-to-text input or text-to-speech were enabled.
+- Text-to-speech still used a key-centric backend path while other Speech flows already supported managed identity.
+
+## Technical Details
+
+### Files Modified
+
+- `application/single_app/config.py`
+- `application/single_app/functions_authentication.py`
+- `application/single_app/functions_documents.py`
+- `application/single_app/functions_settings.py`
+- `application/single_app/route_backend_tts.py`
+- `application/single_app/route_frontend_admin_settings.py`
+- `application/single_app/static/js/admin/admin_settings.js`
+- `application/single_app/templates/_video_indexer_info.html`
+- `application/single_app/templates/admin_settings.html`
+- `docs/admin_configuration.md`
+- `docs/reference/admin_configuration.md`
+- `docs/how-to/azure_speech_managed_identity_manul_setup.md`
+- `docs/setup_instructions_special.md`
+- `functional_tests/test_multimedia_support_reorganization.py`
+- `functional_tests/test_video_indexer_dual_authentication_support.py`
+- `ui_tests/test_admin_multimedia_guidance.py`
+
+### Code Changes Summary
+
+- Added `speech_service_resource_id` to admin settings and persistence.
+- Added a shared Speech synthesis configuration helper so text-to-speech can use managed identity correctly.
+- Updated the admin multimedia walkthrough to use Video Indexer cloud selection and ARM resource fields instead of a legacy API-key path.
+- Updated admin JavaScript so the shared Speech section appears when any Speech feature is enabled.
+- Added Video Indexer cloud selection, effective endpoint display, and clearer identity guidance in the admin UI and help modal.
+
+### Testing Approach
+
+- Updated source-inspection functional tests for the current multimedia UI.
+- Updated the legacy Video Indexer functional test to validate the managed-identity-only flow.
+- Added a Playwright admin UI regression test for the Video Indexer cloud selector and shared Speech managed-identity fields.
+
+## Validation
+
+### Before
+
+- Users could still find walkthrough and documentation references to Video Indexer API keys.
+- The Speech settings panel could stay hidden when only speech-to-text input or text-to-speech was enabled.
+- Managed-identity text-to-speech lacked the required Speech Resource ID guidance and backend support.
+
+### After
+
+- The admin UI consistently points users to managed identity for Video Indexer.
+- Video Indexer cloud selection and endpoint behavior are explicit.
+- Shared Speech guidance now explains the extra managed-identity requirement for voice responses.
+- Functional and UI regression coverage now checks the updated configuration path.
\ No newline at end of file
diff --git a/docs/explanation/fixes/v0.241.008/SPEECH_RESOURCE_ID_BUILDER_FIX.md b/docs/explanation/fixes/v0.241.008/SPEECH_RESOURCE_ID_BUILDER_FIX.md
new file mode 100644
index 00000000..f7e8bef5
--- /dev/null
+++ b/docs/explanation/fixes/v0.241.008/SPEECH_RESOURCE_ID_BUILDER_FIX.md
@@ -0,0 +1,48 @@
+# Speech Resource ID Builder Fix
+
+Fixed in version: **0.241.008**
+
+## Overview
+
+This fix makes the managed-identity Speech configuration easier to complete by helping admins build the Azure Resource Manager Speech resource ID directly in the Admin Settings page.
+
+## Issue Description
+
+The Speech Resource ID is required for managed-identity text-to-speech, but it is not easy to locate in the Azure portal. Users had to either find the value manually in the resource properties page or use Azure CLI commands outside the app.
+
+## Root Cause
+
+The admin UI only exposed a single raw Speech Resource ID field. It did not provide any assistance for constructing the ARM path even though the required parts are predictable.
+
+## Technical Details
+
+### Files Modified
+
+- `application/single_app/config.py`
+- `application/single_app/functions_settings.py`
+- `application/single_app/route_frontend_admin_settings.py`
+- `application/single_app/static/js/admin/admin_settings.js`
+- `application/single_app/templates/admin_settings.html`
+- `application/external_apps/databaseseeder/artifacts/admin_settings.json`
+- `docs/how-to/azure_speech_managed_identity_manul_setup.md`
+- `functional_tests/test_multimedia_support_reorganization.py`
+- `ui_tests/test_admin_multimedia_guidance.py`
+
+### Code Changes Summary
+
+- Added optional helper fields for Speech Subscription ID, Speech Resource Group, and Speech Resource Name.
+- Added client-side builder logic that assembles the Speech ARM resource ID in the format `/subscriptions//resourceGroups//providers/Microsoft.CognitiveServices/accounts/`.
+- Added endpoint-based resource-name inference for common custom-domain Speech endpoints.
+- Persisted the helper fields in admin settings and the seeded sample admin settings artifact.
+
+## Validation
+
+### Before
+
+- Admins had to find or type the full Speech Resource ID manually.
+- The portal location for the Resource ID was easy to miss.
+
+### After
+
+- Admins can provide the subscription ID, resource group, and Speech resource name and let the UI build the full Resource ID.
+- The built value is still editable, so manual overrides remain possible.
\ No newline at end of file
diff --git a/docs/explanation/release_notes.md b/docs/explanation/release_notes.md
index 1092c4c8..da34cbad 100644
--- a/docs/explanation/release_notes.md
+++ b/docs/explanation/release_notes.md
@@ -4,6 +4,44 @@ This page tracks notable Simple Chat releases and organizes the detailed change
For feature-focused and fix-focused drill-downs by version, see [Features by Version](/explanation/features/) and [Fixes by Version](/explanation/fixes/).
+### **(v0.241.006)**
+
+#### Bug Fixes
+
+* **Speech and Video Indexer Setup Guidance Alignment**
+ * Fixed stale admin guidance around Azure AI Video Indexer and shared Azure Speech configuration so managed-identity setup no longer points admins toward legacy Video Indexer API keys or incomplete Speech instructions.
+ * The admin experience now reflects the shared Speech resource model, adds Speech Resource ID helper fields, and keeps managed-identity voice-response requirements aligned with runtime behavior.
+ * (Ref: `admin_settings.html`, `admin_settings.js`, `route_backend_tts.py`, `functions_documents.py`, shared Speech and Video Indexer guidance)
+
+* **Agent Output Token Defaults and Foundry Limit Enforcement**
+ * Fixed stale agent output-token defaults so new and normalized agents now use `-1` to defer to the provider or model default instead of silently reintroducing older fixed caps.
+ * Azure AI Foundry agent execution now also honors saved output-token settings in both classic Foundry agent runs and new Foundry Responses-based runs, so configured limits are enforced consistently instead of only being stored in agent configuration.
+ * (Ref: `functions_global_agents.py`, `agent.schema.json`, `foundry_agent_runtime.py`, `test_foundry_token_limit_defaults.py`)
+
+* **Tabular Exhaustive Result Synthesis Retry**
+ * Fixed exhaustive tabular questions such as "list all" requests so the workflow no longer stops at an answer that claims only sample rows or workbook metadata are available after analytical tool calls already returned the full matching result set.
+ * General tabular analysis now detects full versus partial result coverage from tool metadata, retries incomplete synthesis when necessary, and adds stronger prompt guidance so the final answer uses the returned analytical results directly.
+ * (Ref: `route_backend_chats.py`, `test_tabular_exhaustive_result_synthesis_fix.py`, `TABULAR_EXHAUSTIVE_RESULT_SYNTHESIS_FIX.md`)
+
+* **Group Workspace Documents and Prompts Load Recovery**
+ * Fixed a Group Workspace page-load regression where active-group initialization could fail on a missing prompt-role UI container and stop the rest of the page from rendering correctly.
+ * Group document and prompt content now continue loading even if the prompt permission banner or create-button container is unavailable during startup, preventing blank content areas caused by a JavaScript null-reference error.
+ * Added functional and UI regression coverage for the guarded prompt-role path so future changes do not reintroduce the same startup failure.
+ * (Ref: `group_workspaces.html`, `test_group_workspace_prompt_role_ui_guard.py`, `test_group_workspace_prompt_role_containers_ui.py`)
+
+* **Audio and Video Enhanced Citation Badge Consistency**
+ * Fixed blob-backed audio and video documents showing Standard citations in workspace details even when Enhanced Citations was enabled and the same files already opened through the enhanced citation experience on the chat page.
+ * Document metadata now persists and normalizes the `enhanced_citations` flag from blob-backed storage state so existing media uploads and new uploads both render the correct Enhanced badge across workspace and chat flows.
+ * Added regression coverage and fix documentation for the metadata normalization path.
+ * (Ref: `functions_documents.py`, `route_enhanced_citations.py`, `test_media_enhanced_citations_metadata_flag.py`, `MEDIA_ENHANCED_CITATION_BADGE_FIX.md`)
+
+#### User Interface Enhancements
+
+* **AI Voice Conversations Setup Guide**
+ * Added an in-app Setup Guide modal to the AI Voice Conversations admin card so admins can configure Azure Speech without leaving Admin Settings.
+ * The guide includes a live snapshot of the current Speech configuration, explains key versus managed-identity authentication, and now walks admins through enabling the required custom domain in Azure portal before verifying the endpoint on Keys and Endpoint.
+ * (Ref: `admin_settings.html`, `_speech_service_info.html`, `azure_speech_managed_identity_manul_setup.md`, `test_admin_multimedia_guidance.py`)
+
### **(v0.241.002)**
#### Bug Fixes
diff --git a/docs/how-to/azure_speech_managed_identity_manul_setup.md b/docs/how-to/azure_speech_managed_identity_manul_setup.md
index bf1b6e74..f991e02a 100644
--- a/docs/how-to/azure_speech_managed_identity_manul_setup.md
+++ b/docs/how-to/azure_speech_managed_identity_manul_setup.md
@@ -2,7 +2,7 @@
## Overview
-This guide explains the critical difference between key-based and managed identity authentication when configuring Azure Speech Service, and the required steps to enable managed identity properly.
+This guide explains the critical difference between key-based and managed identity authentication when configuring Azure Speech Service, and the required steps to enable managed identity properly. In Simple Chat, the same Speech configuration is shared across audio uploads, speech-to-text chat input, and text-to-speech voice responses.
## Authentication Methods: Regional vs. Resource-Specific Endpoints
@@ -74,6 +74,8 @@ Headers:
3. If your App Service MI has `Cognitive Services Speech User` role β authorized
4. The request proceeds to your dedicated Speech resource instance
+For some transcription operations, you may also need `Cognitive Services Speech Contributor`. Start with `Speech User`, then add `Speech Contributor` if transcription still fails after endpoint and identity configuration are correct.
+
---
## Required Setup for Managed Identity
@@ -84,13 +86,32 @@ Headers:
2. **System-assigned or user-assigned managed identity** on your App Service
3. **RBAC role assignments** on the Speech resource
-### Step 1: Enable Custom Subdomain on Speech Resource
+### Step 1: Turn On the Custom Domain on the Speech Resource
**Why needed**: By default, Speech resources use the regional endpoint and do NOT have custom subdomains. Managed identity requires the resource-specific endpoint.
-**How to enable**:
+#### Azure portal walkthrough
+
+1. Go to the Azure portal and open your **Azure AI Speech** resource.
+2. In the left pane under **Resource Management**, select **Networking**.
+3. Open the **Firewalls and virtual networks** tab.
+4. Select **Generate Custom Domain Name**.
+5. Enter a globally unique custom domain name. The final endpoint will look like `https://.cognitiveservices.azure.com`.
+6. Select **Save**.
+7. After the update finishes, open **Keys and Endpoint** and confirm the resource endpoint now starts with `https://.cognitiveservices.azure.com`.
+
+**Important notes**:
+- Custom subdomain name must be **globally unique** across Azure
+- Usually use the same name as your resource: ``
+- **One-way operation**: Cannot be disabled once enabled
+- Microsoft Learn recommends trying the change on a test resource first if the production Speech resource already has many Speech Studio models or projects
+
+#### Azure CLI alternative
+
+If you prefer CLI instead of the portal:
```bash
+az account set --subscription
az cognitiveservices account update \
--name \
--resource-group \
@@ -100,19 +121,22 @@ az cognitiveservices account update \
**Example**:
```bash
+az account set --subscription
az cognitiveservices account update \
--name simplechat6-dev-speech \
--resource-group sc-simplechat6-dev-rg \
--custom-domain simplechat6-dev-speech
```
-**Important notes**:
-- Custom subdomain name must be **globally unique** across Azure
-- Usually use the same name as your resource: ``
-- **One-way operation**: Cannot be disabled once enabled
-- After enabling, the resource's endpoint property changes from regional to resource-specific
+#### Verify the custom domain is enabled
+
+Portal verification:
+
+1. Open the Speech resource.
+2. Go to **Keys and Endpoint**.
+3. Confirm the endpoint now starts with `https://.cognitiveservices.azure.com` instead of `https://.api.cognitive.microsoft.com`.
-**Verify custom subdomain is enabled**:
+CLI verification:
```bash
az cognitiveservices account show \
@@ -146,13 +170,13 @@ MI_PRINCIPAL_ID=$(az webapp identity show \
--resource-group \
--query principalId -o tsv)
-# Assign Cognitive Services Speech User role (data-plane read access)
+# Assign Cognitive Services Speech User role (baseline data-plane access)
az role assignment create \
--assignee $MI_PRINCIPAL_ID \
--role "Cognitive Services Speech User" \
--scope $SPEECH_RESOURCE_ID
-# Assign Cognitive Services Speech Contributor role (if needed for write operations)
+# Assign Cognitive Services Speech Contributor role (if transcription operations still require it)
az role assignment create \
--assignee $MI_PRINCIPAL_ID \
--role "Cognitive Services Speech Contributor" \
@@ -172,19 +196,30 @@ az role assignment list \
In the Admin Settings β Search & Extract β Multimedia Support section:
+- Use the **Setup Guide** button on the **AI Voice Conversations** card if you want an in-app walkthrough while filling the Speech fields.
+
| Setting | Value | Example |
|---------|-------|---------|
| **Enable Audio File Support** | β Checked | |
+| **Enable Speech-to-Text Input** | Optional | |
+| **Enable Text-to-Speech** | Optional | |
| **Speech Service Endpoint** | Resource-specific endpoint (with custom subdomain) | `https://simplechat6-dev-speech.cognitiveservices.azure.com` |
| **Speech Service Location** | Azure region | `eastus2` |
| **Speech Service Locale** | Language locale for transcription | `en-US` |
| **Authentication Type** | Managed Identity | |
+| **Speech Subscription ID** | Optional helper for building the ARM resource ID in the Admin UI | `12345678-1234-1234-1234-123456789abc` |
+| **Speech Resource Group** | Optional helper for building the ARM resource ID in the Admin UI | `rg-speech-prod` |
+| **Speech Resource Name** | Optional helper for building the ARM resource ID in the Admin UI | `my-speech-resource` |
| **Speech Service Key** | (Leave empty when using MI) | |
+| **Speech Resource ID** | Required when using managed identity for text-to-speech | `/subscriptions/.../providers/Microsoft.CognitiveServices/accounts/` |
**Critical**:
- Endpoint must be the resource-specific URL (custom subdomain)
- Do NOT use the regional endpoint for managed identity
+- If you have not created the custom domain yet, use the Azure portal walkthrough in Step 1 before saving the Speech endpoint in Admin Settings
- Remove trailing slash from endpoint: β `https://..azure.com` β `https://..azure.com/`
+- If text-to-speech is enabled with managed identity, set the full Speech Resource ID in Admin Settings
+- If you do not know the full resource ID, the Admin Settings page can build it from Subscription ID, Resource Group, and Speech Resource Name
### Step 4: Test Audio Upload
@@ -228,6 +263,14 @@ In the Admin Settings β Search & Extract β Multimedia Support section:
**Solution**: Assign required roles using Step 2 above
+### Error: Text-to-speech fails with MI but transcription works
+
+**Symptom**: Audio uploads or speech-to-text input succeed, but `/api/chat/tts` fails when authentication type is Managed Identity.
+
+**Cause**: Text-to-speech managed identity also requires the Speech Resource ID in addition to the custom-domain endpoint and region.
+
+**Solution**: Populate **Speech Resource ID** in Admin Settings and verify the App Service managed identity has the required RBAC role(s).
+
### Key auth works but MI fails
**Diagnosis checklist**:
diff --git a/docs/latest-release/index.md b/docs/latest-release/index.md
index a6b1f253..0f55a08a 100644
--- a/docs/latest-release/index.md
+++ b/docs/latest-release/index.md
@@ -5,74 +5,4 @@ description: "Current feature guides with previous release highlights kept for r
section: "Latest Release"
---
-{% assign feature_data = site.data.latest_release_features %}
-
-
-
-
-
Current release
-
{{ feature_data.current_release.label }}
-
{{ feature_data.current_release.description }}
-
- {{ feature_data.current_release.badge }}
-
-
-
- {% for slug in feature_data.current_release.slugs %}
- {% assign feature = feature_data.lookup[slug] %}
- {% include latest_release_card.html feature=feature badge=feature_data.current_release.badge %}
- {% endfor %}
-
-
-
-{% for group in feature_data.previous_release_groups %}
-
-
-
- Archive
- {{ group.label }}
- {{ group.description }}
-
-
-
- v{{ group.release_version }}
- Show highlights
-
-
-
-
-
- {% for slug in group.slugs %}
- {% assign feature = feature_data.lookup[slug] %}
- {% include latest_release_card.html feature=feature badge=group.badge %}
- {% endfor %}
-
-
- {% if group.highlights %}
-
-
Additional highlights from v{{ group.release_version }}
-
- {% for item in group.highlights %}
-
{{ item }}
- {% endfor %}
-
-
- {% endif %}
-
- {% if group.bug_fixes %}
-
-
Bug fixes kept for reference
-
- {% for item in group.bug_fixes %}
-
{{ item }}
- {% endfor %}
-
-
- {% endif %}
-
-
-{% endfor %}
-
-
+This page mirrors the curated in-app Latest Features experience and keeps earlier release highlights available in an archive section below the current release.
diff --git a/docs/reference/admin_configuration.md b/docs/reference/admin_configuration.md
index 61916c82..0a622b6a 100644
--- a/docs/reference/admin_configuration.md
+++ b/docs/reference/admin_configuration.md
@@ -112,17 +112,23 @@ Control document workspace features and capabilities.
#### Multimedia Support
**Video Processing (Azure Video Indexer)**
- **Enable/Disable**: Toggle video file support
+- **Cloud / Endpoint Mode**: Choose Azure Public, Azure Government, or Custom Endpoint
+- **Resource Group**: Azure resource group containing the Video Indexer resource
+- **Subscription ID**: Azure subscription GUID for the Video Indexer resource
+- **Account Name**: Video Indexer resource name
- **Account ID**: Video Indexer account identifier
- **Location**: Geographic location of Video Indexer account
-- **API Key**: Authentication key for Video Indexer
-- **API Endpoint**: Video Indexer service endpoint
+- **API Endpoint**: Derived from the selected cloud mode unless Custom Endpoint is selected
+- **Authentication Model**: App Service system-assigned managed identity with `Contributor` on the Video Indexer resource
- **Timeout Settings**: Processing timeout limits
**Audio Processing (Azure Speech Service)**
-- **Enable/Disable**: Toggle audio file support
+- **Shared Service**: The same Speech configuration is used for audio uploads, speech-to-text input, and text-to-speech
- **Endpoint**: Speech service endpoint URL
- **Region**: Azure region for Speech service
-- **API Key**: Authentication key for Speech service
+- **Locale**: Default transcription locale
+- **Authentication Type**: Key or Managed Identity
+- **Speech Resource ID**: Required for managed-identity text-to-speech
#### Metadata Extraction
- **Enable/Disable**: Toggle AI-powered metadata extraction
@@ -312,6 +318,8 @@ Guidelines:
- β Enable Content Safety for production deployments
- β Implement proper RBAC role assignments
- β Monitor access to admin settings
+- β Use managed identity, not API keys, for Azure Video Indexer in the current Simple Chat setup
+- β Use the Speech custom-domain endpoint for managed identity and add the Speech Resource ID when enabling managed-identity voice responses
### Performance
- β Test all service connections after configuration
diff --git a/docs/setup_instructions_special.md b/docs/setup_instructions_special.md
index 9c3f7ca0..08332aed 100644
--- a/docs/setup_instructions_special.md
+++ b/docs/setup_instructions_special.md
@@ -80,14 +80,15 @@ Using Managed Identity allows the App Service to authenticate to other Azure res
| Document Intelligence | Cognitive Services User | Allows using the DI service for analysis. |
| Content Safety | Azure AI Developer | Allows using the CS service for analysis. (Role name might vary slightly, check portal) |
| Azure Storage Account | Storage Blob Data Contributor | Required for Enhanced Citations if using Managed Identity. Allows reading/writing blobs. |
- | Azure Speech Service | Cognitive Services Speech Contributor | Allows using the Speech service for transcription. |
- | Video Indexer | (Handled via VI resource settings) | VI typically uses its own Managed Identity to access associated Storage/Media Services. Check VI docs. |
+ | Azure Speech Service | Cognitive Services Speech User (plus Speech Contributor if needed) | Use the custom-domain endpoint for managed identity. Add `Cognitive Services Speech Contributor` if transcription operations still require it. Managed-identity text-to-speech also needs the Speech Resource ID in Admin Settings. |
+ | Video Indexer | Contributor on the Video Indexer resource | Grant the App Service system-assigned managed identity `Contributor` on the Video Indexer resource. Video Indexer resource creation may also ask for a separate user-assigned managed identity for its own storage/media integration. |
3. **Configure Application to Use Managed Identity**:
- Update the **Application settings** in the App Service (or .env before upload) **OR** use the toggles in the **Admin Settings UI** where available.
- **Cosmos DB**: Set AZURE_COSMOS_AUTHENTICATION_TYPE="managed_identity" in Application Settings. Remove AZURE_COSMOS_KEY and AZURE_COSMOS_CONNECTION_STRING.
- **Other Services (OpenAI, Search, DI, CS, Storage)**: Check the **Admin Settings UI** first. Most sections (GPT, Embeddings, Image Gen, Citations, Safety, Search & Extract) have toggles or dropdowns to select "Managed Identity" as the authentication method. Using the UI toggle is preferred as it handles the backend configuration. If UI options aren't present or for overrides, you might need specific environment variables like AZURE_OPENAI_USE_MANAGED_IDENTITY="True", but rely on the UI where possible.
+ - **Speech and Video Indexer**: In Search & Extract β Multimedia Support, the Speech section is shared by audio uploads, speech-to-text input, and text-to-speech. Video Indexer uses the App Service managed identity and the cloud/end-point selector rather than a Video Indexer API key.
## Enterprise Networking
> Return to top
diff --git a/functional_tests/test_foundry_token_limit_defaults.py b/functional_tests/test_foundry_token_limit_defaults.py
new file mode 100644
index 00000000..f96aff50
--- /dev/null
+++ b/functional_tests/test_foundry_token_limit_defaults.py
@@ -0,0 +1,286 @@
+# test_foundry_token_limit_defaults.py
+#!/usr/bin/env python3
+"""
+Functional test for Foundry token limit defaults and runtime forwarding.
+Version: 0.241.005
+Implemented in: 0.241.005
+
+This test ensures seeded agent defaults use model-native output limits and
+that classic and new Foundry runtimes forward configured token caps.
+"""
+
+import asyncio
+import importlib
+import sys
+import types
+from pathlib import Path
+
+
+ROOT = Path(__file__).resolve().parents[1]
+SINGLE_APP_ROOT = ROOT / "application" / "single_app"
+
+sys.path.insert(0, str(SINGLE_APP_ROOT))
+sys.path.insert(0, str(ROOT))
+
+
+def assert_contains(file_path: Path, expected: str) -> None:
+ content = file_path.read_text(encoding="utf-8")
+ if expected not in content:
+ raise AssertionError(f"Expected to find {expected!r} in {file_path}")
+
+
+def restore_modules(original_modules):
+ for module_name, original_module in original_modules.items():
+ if original_module is None:
+ sys.modules.pop(module_name, None)
+ else:
+ sys.modules[module_name] = original_module
+
+
+def load_foundry_agent_runtime_module():
+ functions_appinsights_stub = types.ModuleType("functions_appinsights")
+ functions_appinsights_stub.log_event = lambda *args, **kwargs: None
+
+ functions_debug_stub = types.ModuleType("functions_debug")
+ functions_debug_stub.debug_print = lambda *args, **kwargs: None
+
+ functions_keyvault_stub = types.ModuleType("functions_keyvault")
+ functions_keyvault_stub.retrieve_secret_from_key_vault_by_full_name = lambda value: value
+ functions_keyvault_stub.validate_secret_name_dynamic = lambda value: False
+
+ requests_stub = types.ModuleType("requests")
+ requests_stub.last_post_args = None
+ requests_stub.last_post_kwargs = None
+
+ class StubResponse:
+ def __init__(self, payload, status_code=200, headers=None, text=""):
+ self._payload = payload
+ self.status_code = status_code
+ self.headers = headers or {"Content-Type": "application/json"}
+ self.text = text
+
+ def json(self):
+ return self._payload
+
+ def close(self):
+ return None
+
+ def post(*args, **kwargs):
+ requests_stub.last_post_args = args
+ requests_stub.last_post_kwargs = kwargs
+ return StubResponse(
+ {
+ "id": "resp-123",
+ "model": "gpt-5.4",
+ "output": [
+ {
+ "type": "message",
+ "content": [
+ {
+ "type": "output_text",
+ "text": "new foundry result",
+ }
+ ],
+ }
+ ],
+ }
+ )
+
+ requests_stub.Response = StubResponse
+ requests_stub.get = lambda *args, **kwargs: None
+ requests_stub.post = post
+
+ azure_stub = types.ModuleType("azure")
+ azure_identity_stub = types.ModuleType("azure.identity")
+ azure_identity_aio_stub = types.ModuleType("azure.identity.aio")
+
+ class Token:
+ def __init__(self, value):
+ self.token = value
+
+ class SyncDefaultAzureCredential:
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+ def get_token(self, scope):
+ return Token(f"sync:{scope}")
+
+ def close(self):
+ return None
+
+ class SyncClientSecretCredential(SyncDefaultAzureCredential):
+ pass
+
+ class AsyncDefaultAzureCredential:
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+ async def get_token(self, scope):
+ return Token(f"async:{scope}")
+
+ async def close(self):
+ return None
+
+ class AsyncClientSecretCredential(AsyncDefaultAzureCredential):
+ pass
+
+ class AzureAuthorityHosts:
+ AZURE_PUBLIC_CLOUD = "public"
+ AZURE_GOVERNMENT = "government"
+
+ azure_identity_stub.AzureAuthorityHosts = AzureAuthorityHosts
+ azure_identity_stub.ClientSecretCredential = SyncClientSecretCredential
+ azure_identity_stub.DefaultAzureCredential = SyncDefaultAzureCredential
+ azure_identity_aio_stub.ClientSecretCredential = AsyncClientSecretCredential
+ azure_identity_aio_stub.DefaultAzureCredential = AsyncDefaultAzureCredential
+
+ semantic_kernel_stub = types.ModuleType("semantic_kernel")
+ semantic_kernel_agents_stub = types.ModuleType("semantic_kernel.agents")
+ semantic_kernel_contents_stub = types.ModuleType("semantic_kernel.contents")
+ semantic_kernel_chat_stub = types.ModuleType("semantic_kernel.contents.chat_message_content")
+
+ class StubAgentsOperations:
+ async def get_agent(self, agent_id):
+ return types.SimpleNamespace(model={"id": "gpt-5.4"}, agent_id=agent_id)
+
+ async def delete_thread(self, thread_id):
+ return None
+
+ class StubClient:
+ def __init__(self):
+ self.agents = StubAgentsOperations()
+
+ async def close(self):
+ return None
+
+ async def _delete_thread():
+ return None
+
+ class ChatMessageContent:
+ def __init__(self, content="", role="user", metadata=None):
+ self.content = content
+ self.role = role
+ self.metadata = metadata or {}
+ self.items = []
+
+ class AzureAIAgent:
+ last_invoke_kwargs = None
+
+ def __init__(self, client=None, definition=None):
+ self.client = client
+ self.definition = definition
+
+ @staticmethod
+ def create_client(*args, **kwargs):
+ return StubClient()
+
+ async def invoke(self, **kwargs):
+ AzureAIAgent.last_invoke_kwargs = kwargs
+ message = ChatMessageContent(content="classic foundry result", metadata={})
+ thread = types.SimpleNamespace(id="thread-123", delete=_delete_thread)
+ yield types.SimpleNamespace(thread=thread, message=message)
+
+ semantic_kernel_agents_stub.AzureAIAgent = AzureAIAgent
+ semantic_kernel_chat_stub.ChatMessageContent = ChatMessageContent
+
+ original_modules = {}
+ module_stubs = {
+ "functions_appinsights": functions_appinsights_stub,
+ "functions_debug": functions_debug_stub,
+ "functions_keyvault": functions_keyvault_stub,
+ "requests": requests_stub,
+ "azure": azure_stub,
+ "azure.identity": azure_identity_stub,
+ "azure.identity.aio": azure_identity_aio_stub,
+ "semantic_kernel": semantic_kernel_stub,
+ "semantic_kernel.agents": semantic_kernel_agents_stub,
+ "semantic_kernel.contents": semantic_kernel_contents_stub,
+ "semantic_kernel.contents.chat_message_content": semantic_kernel_chat_stub,
+ }
+
+ for module_name, module_stub in module_stubs.items():
+ original_modules[module_name] = sys.modules.get(module_name)
+ sys.modules[module_name] = module_stub
+
+ original_modules["foundry_agent_runtime"] = sys.modules.get("foundry_agent_runtime")
+ sys.modules.pop("foundry_agent_runtime", None)
+ module = importlib.import_module("foundry_agent_runtime")
+ return module, original_modules, requests_stub, AzureAIAgent
+
+
+def test_foundry_defaults_and_runtime_forwarding():
+ """Seeded defaults should use -1 and Foundry runtimes should forward token caps."""
+ print("π Testing Foundry defaults and runtime token forwarding...")
+
+ globals_path = ROOT / "application" / "single_app" / "functions_global_agents.py"
+ schema_path = ROOT / "application" / "single_app" / "static" / "json" / "schemas" / "agent.schema.json"
+
+ assert_contains(globals_path, '"max_completion_tokens": -1')
+ assert_contains(schema_path, '"default": -1')
+
+ module, original_modules, requests_stub, azure_ai_agent_cls = load_foundry_agent_runtime_module()
+
+ try:
+ message_history = [module.ChatMessageContent(content="Hello Foundry")]
+
+ classic_result = asyncio.run(
+ module.execute_foundry_agent(
+ foundry_settings={
+ "agent_id": "agent-123",
+ "endpoint": "https://example.services.ai.azure.com",
+ },
+ global_settings={},
+ message_history=message_history,
+ metadata={"conversation_id": "conv-1"},
+ max_completion_tokens=4096,
+ )
+ )
+
+ assert azure_ai_agent_cls.last_invoke_kwargs is not None
+ assert azure_ai_agent_cls.last_invoke_kwargs.get("max_completion_tokens") == 4096
+ assert classic_result.message == "classic foundry result"
+
+ payload_without_limit = module._build_new_foundry_request_payload(
+ message_history,
+ {"conversation_id": "conv-1"},
+ stream=False,
+ )
+ assert "max_output_tokens" not in payload_without_limit
+
+ new_result = asyncio.run(
+ module.execute_new_foundry_agent(
+ foundry_settings={
+ "application_name": "test-app",
+ "endpoint": "https://example.services.ai.azure.com",
+ "responses_api_version": "2025-11-15-preview",
+ },
+ global_settings={},
+ message_history=message_history,
+ metadata={"conversation_id": "conv-2"},
+ max_completion_tokens=8192,
+ )
+ )
+
+ assert requests_stub.last_post_kwargs is not None
+ assert requests_stub.last_post_kwargs["json"].get("max_output_tokens") == 8192
+ assert new_result.message == "new foundry result"
+ finally:
+ restore_modules(original_modules)
+
+ print("β Foundry defaults and runtime token forwarding verified.")
+
+
+if __name__ == "__main__":
+ success = True
+ try:
+ test_foundry_defaults_and_runtime_forwarding()
+ except Exception as exc:
+ print(f"β Test failed: {exc}")
+ import traceback
+
+ traceback.print_exc()
+ success = False
+
+ raise SystemExit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_group_workspace_prompt_role_ui_guard.py b/functional_tests/test_group_workspace_prompt_role_ui_guard.py
new file mode 100644
index 00000000..6741319c
--- /dev/null
+++ b/functional_tests/test_group_workspace_prompt_role_ui_guard.py
@@ -0,0 +1,87 @@
+# test_group_workspace_prompt_role_ui_guard.py
+"""
+Functional test for group workspace prompt role UI guard.
+Version: 0.241.007
+Implemented in: 0.241.007
+
+This test ensures that the group workspace prompt role UI safely handles
+missing prompt containers so active-group loading can continue.
+"""
+
+import os
+import sys
+
+
+ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+GROUP_WORKSPACE_TEMPLATE = os.path.join(
+ ROOT_DIR,
+ "application",
+ "single_app",
+ "templates",
+ "group_workspaces.html",
+)
+CONFIG_FILE = os.path.join(
+ ROOT_DIR,
+ "application",
+ "single_app",
+ "config.py",
+)
+
+
+def read_file(path):
+ with open(path, "r", encoding="utf-8") as file_handle:
+ return file_handle.read()
+
+
+def test_group_workspace_prompt_role_ui_uses_guarded_dom_access():
+ """Verify prompt role UI updates tolerate missing containers."""
+ print("Testing group workspace prompt role UI guard...")
+
+ content = read_file(GROUP_WORKSPACE_TEMPLATE)
+
+ required_snippets = [
+ 'const createGroupPromptSection = document.getElementById(',
+ '"create-group-prompt-section"',
+ 'const groupPromptsRoleWarning = document.getElementById(',
+ '"group-prompts-role-warning"',
+ 'if (!createGroupPromptSection || !groupPromptsRoleWarning) {',
+ 'createGroupPromptSection.classList.toggle("d-none", !canManage);',
+ 'groupPromptsRoleWarning.classList.toggle("d-none", canManage);',
+ ]
+ missing = [snippet for snippet in required_snippets if snippet not in content]
+ assert not missing, f"Missing guarded prompt role UI snippets: {missing}"
+
+ forbidden_snippets = [
+ 'document.getElementById("create-group-prompt-section").style.display',
+ 'document.getElementById("group-prompts-role-warning").style.display',
+ ]
+ present = [snippet for snippet in forbidden_snippets if snippet in content]
+ assert not present, f"Unexpected direct prompt role UI DOM access found: {present}"
+
+ print("Prompt role UI guard is present")
+
+
+def test_config_version_is_bumped_for_prompt_role_ui_guard_fix():
+ """Verify config version was bumped for the prompt role UI guard fix."""
+ print("Testing config version bump...")
+
+ config_content = read_file(CONFIG_FILE)
+ assert 'VERSION = "0.241.007"' in config_content, "Expected config.py version 0.241.007"
+
+ print("Config version bump passed")
+
+
+if __name__ == "__main__":
+ tests = [
+ test_group_workspace_prompt_role_ui_uses_guarded_dom_access,
+ test_config_version_is_bumped_for_prompt_role_ui_guard_fix,
+ ]
+
+ results = []
+ for test in tests:
+ print(f"\nRunning {test.__name__}...")
+ results.append(test())
+
+ success = all(results)
+ print(f"\nResults: {sum(results)}/{len(results)} tests passed")
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_media_enhanced_citations_metadata_flag.py b/functional_tests/test_media_enhanced_citations_metadata_flag.py
new file mode 100644
index 00000000..bf57a829
--- /dev/null
+++ b/functional_tests/test_media_enhanced_citations_metadata_flag.py
@@ -0,0 +1,153 @@
+#!/usr/bin/env python3
+# test_media_enhanced_citations_metadata_flag.py
+"""
+Functional test for media enhanced citation metadata normalization.
+Version: 0.241.007
+Implemented in: 0.241.007
+
+This test ensures blob-backed audio and video documents are marked as
+enhanced citations in stored metadata so workspace badges match chat behavior.
+"""
+
+import ast
+import os
+import re
+import sys
+
+
+ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+SINGLE_APP_ROOT = os.path.join(ROOT_DIR, 'application', 'single_app')
+FUNCTIONS_DOCUMENTS_FILE = os.path.join(SINGLE_APP_ROOT, 'functions_documents.py')
+ROUTE_FILE = os.path.join(SINGLE_APP_ROOT, 'route_enhanced_citations.py')
+CONFIG_FILE = os.path.join(SINGLE_APP_ROOT, 'config.py')
+
+
+def read_file(path):
+ with open(path, 'r', encoding='utf-8') as file_handle:
+ return file_handle.read()
+
+
+def load_normalization_helpers():
+ """Compile the normalization helpers directly from source for focused validation."""
+ source = read_file(FUNCTIONS_DOCUMENTS_FILE)
+ module_ast = ast.parse(source, filename=FUNCTIONS_DOCUMENTS_FILE)
+
+ helper_names = {
+ '_has_persisted_blob_reference',
+ '_normalize_document_enhanced_citations',
+ }
+ helper_nodes = [
+ node for node in module_ast.body
+ if isinstance(node, ast.FunctionDef) and node.name in helper_names
+ ]
+
+ missing_helpers = helper_names.difference({node.name for node in helper_nodes})
+ assert not missing_helpers, f'Missing normalization helpers: {sorted(missing_helpers)}'
+
+ isolated_module = ast.Module(body=helper_nodes, type_ignores=[])
+ namespace = {'ARCHIVED_REVISION_BLOB_PATH_MODE': 'archived_revision'}
+ exec(compile(isolated_module, FUNCTIONS_DOCUMENTS_FILE, 'exec'), namespace)
+ return namespace['_normalize_document_enhanced_citations']
+
+
+def test_blob_backed_documents_normalize_to_enhanced():
+ """Verify legacy and current blob-backed documents normalize to enhanced citations."""
+ print('π Testing blob-backed document normalization...')
+
+ normalize_document = load_normalization_helpers()
+
+ current_blob_doc = {'id': 'audio-doc', 'blob_path': 'user/audio.mp3'}
+ normalized_current = normalize_document(dict(current_blob_doc))
+ assert normalized_current['enhanced_citations'] is True, 'Current blob path should normalize to enhanced citations'
+
+ archived_blob_doc = {
+ 'id': 'video-doc',
+ 'blob_path': None,
+ 'blob_path_mode': 'archived_revision',
+ 'archived_blob_path': 'user/family/video.mp4',
+ }
+ normalized_archived = normalize_document(dict(archived_blob_doc))
+ assert normalized_archived['enhanced_citations'] is True, 'Archived blob path should normalize to enhanced citations'
+
+ text_only_doc = {'id': 'text-doc', 'blob_path': None, 'archived_blob_path': None}
+ normalized_text = normalize_document(dict(text_only_doc))
+ assert normalized_text['enhanced_citations'] is False, 'Documents without persisted blob references should stay standard'
+
+ print('β Blob-backed document normalization passed')
+ return True
+
+
+def test_blob_upload_persists_enhanced_flag():
+ """Verify uploads stamp the document metadata with enhanced_citations=True."""
+ print('π Testing blob upload metadata stamping...')
+
+ source = read_file(FUNCTIONS_DOCUMENTS_FILE)
+ required_snippets = [
+ 'current_document["enhanced_citations"] = True',
+ '"enhanced_citations": False,',
+ ]
+
+ missing = [snippet for snippet in required_snippets if snippet not in source]
+ assert not missing, f'Missing upload/create metadata snippets: {missing}'
+
+ print('β Blob upload metadata stamping passed')
+ return True
+
+
+def test_document_reads_use_normalized_enhanced_flag():
+ """Verify document list/detail reads expose normalized enhanced citation state."""
+ print('π Testing document read normalization and enhanced citation metadata route...')
+
+ documents_source = read_file(FUNCTIONS_DOCUMENTS_FILE)
+ route_source = read_file(ROUTE_FILE)
+
+ required_document_snippets = [
+ '_normalize_document_enhanced_citations(_choose_current_document(family_documents))',
+ 'return jsonify(_normalize_document_enhanced_citations(document_results[0])), 200',
+ 'return _normalize_document_enhanced_citations(document_items[0]) if document_items else None',
+ ]
+ missing_document_snippets = [
+ snippet for snippet in required_document_snippets if snippet not in documents_source
+ ]
+ assert not missing_document_snippets, (
+ 'Missing document normalization snippets: '
+ f'{missing_document_snippets}'
+ )
+
+ route_snippet = '"enhanced_citations": bool(raw_doc.get("enhanced_citations", False))'
+ assert route_snippet in route_source, 'Enhanced citation metadata route should use normalized per-document flag'
+ assert 'bool(blob_path)' not in route_source, 'Metadata route should no longer infer enhanced citations from a derived blob path'
+
+ print('β Document read normalization passed')
+ return True
+
+
+def test_config_version_bumped_for_media_citation_fix():
+ """Verify config.py version was bumped for this fix."""
+ print('π Testing config version bump...')
+
+ config_source = read_file(CONFIG_FILE)
+ version_match = re.search(r'VERSION = "([0-9.]+)"', config_source)
+ assert version_match, 'Could not find VERSION in config.py'
+ assert version_match.group(1) == '0.241.007', 'Expected config.py version 0.241.007'
+
+ print('β Config version bump passed')
+ return True
+
+
+if __name__ == '__main__':
+ tests = [
+ test_blob_backed_documents_normalize_to_enhanced,
+ test_blob_upload_persists_enhanced_flag,
+ test_document_reads_use_normalized_enhanced_flag,
+ test_config_version_bumped_for_media_citation_fix,
+ ]
+
+ results = []
+ for test in tests:
+ print(f'\nπ§ͺ Running {test.__name__}...')
+ results.append(test())
+
+ success = all(results)
+ print(f'\nπ Results: {sum(results)}/{len(results)} tests passed')
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_multimedia_support_reorganization.py b/functional_tests/test_multimedia_support_reorganization.py
index 5afbfc72..23abfef5 100644
--- a/functional_tests/test_multimedia_support_reorganization.py
+++ b/functional_tests/test_multimedia_support_reorganization.py
@@ -1,235 +1,290 @@
#!/usr/bin/env python3
+# test_multimedia_support_reorganization.py
"""
-Functional test for multimedia support reorganization and Video Indexer configuration modal.
-Version: 0.229.017
-Implemented in: 0.229.017
+Functional test for multimedia support reorganization and shared speech guidance.
+Version: 0.241.010
+Implemented in: 0.241.010
This test ensures that:
-1. Multimedia Support section has been moved from Other tab to Search and Extract tab
-2. Video Indexer configuration modal is properly integrated
-3. All multimedia settings are accessible in the new location
+1. Multimedia Support remains in the Search and Extract tab
+2. The Video Indexer modal reflects the managed-identity-only ARM setup
+3. The AI Voice setup guide is integrated with the shared Speech settings
+4. Shared Speech and Video Indexer settings are accessible in the current admin UI
"""
-import sys
import os
+import sys
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
+
def test_multimedia_support_move():
- """Test that multimedia support has been moved to Search and Extract tab."""
- print("π Testing Multimedia Support section move...")
-
+ """Test that multimedia support remains in the Search and Extract tab."""
+ print("π Testing Multimedia Support section location...")
+
try:
- # Read the admin_settings.html file
admin_settings_path = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ os.path.dirname(os.path.abspath(__file__)),
'..', 'application', 'single_app', 'templates', 'admin_settings.html'
)
-
- with open(admin_settings_path, 'r', encoding='utf-8') as f:
- content = f.read()
-
- # Check that multimedia support is in search-extract tab
+
+ with open(admin_settings_path, 'r', encoding='utf-8') as file_handle:
+ content = file_handle.read()
+
search_extract_section = content.find('id="search-extract" role="tabpanel"')
- multimedia_support_section = content.find('
Multimedia Support
')
-
+ multimedia_support_section = content.find('id="video-intelligence-section"')
+
if search_extract_section == -1:
print("β Search and Extract tab not found")
return False
-
+
if multimedia_support_section == -1:
print("β Multimedia Support section not found")
return False
-
- # Check that multimedia support appears after the search-extract tab
+
if multimedia_support_section < search_extract_section:
- print("β Multimedia Support section not in Search and Extract tab")
+ print("β Multimedia Support section is not within Search and Extract")
return False
-
- # Find the end of search-extract tab
- search_extract_end = content.find('
', content.find('id="other" role="tabpanel"'))
-
- if multimedia_support_section > search_extract_end:
- print("β Multimedia Support section appears to be outside Search and Extract tab")
- return False
-
- print("β Multimedia Support section successfully moved to Search and Extract tab")
+
+ print("β Multimedia Support section is in Search and Extract")
return True
-
- except Exception as e:
- print(f"β Test failed: {e}")
+
+ except Exception as exc:
+ print(f"β Test failed: {exc}")
import traceback
traceback.print_exc()
return False
+
def test_video_indexer_modal():
"""Test that Video Indexer configuration modal is properly integrated."""
print("π Testing Video Indexer configuration modal...")
-
+
try:
- # Check that the modal template file exists
modal_path = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ os.path.dirname(os.path.abspath(__file__)),
'..', 'application', 'single_app', 'templates', '_video_indexer_info.html'
)
-
+
if not os.path.exists(modal_path):
print("β Video Indexer modal template file not found")
return False
-
- # Read the modal template
- with open(modal_path, 'r', encoding='utf-8') as f:
- modal_content = f.read()
-
- # Check for essential modal components
+
+ with open(modal_path, 'r', encoding='utf-8') as file_handle:
+ modal_content = file_handle.read()
+
required_elements = [
'id="videoIndexerInfoModal"',
'Azure AI Video Indexer Configuration Guide',
- 'Create Azure AI Video Indexer Account',
- 'Get API Keys and Configuration',
- 'Configuration Values Reference',
+ 'Cloud / Endpoint Mode',
+ 'App Service system-assigned managed identity',
+ 'Contributor role',
'updateVideoIndexerModalInfo()'
]
-
+
for element in required_elements:
if element not in modal_content:
print(f"β Missing modal element: {element}")
return False
-
- # Check that admin_settings.html includes the modal
+
admin_settings_path = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ os.path.dirname(os.path.abspath(__file__)),
'..', 'application', 'single_app', 'templates', 'admin_settings.html'
)
-
- with open(admin_settings_path, 'r', encoding='utf-8') as f:
- admin_content = f.read()
-
- if "_video_indexer_info.html" not in admin_content:
+
+ with open(admin_settings_path, 'r', encoding='utf-8') as file_handle:
+ admin_content = file_handle.read()
+
+ if '_video_indexer_info.html' not in admin_content:
print("β Video Indexer modal not included in admin_settings.html")
return False
-
- # Check for the modal trigger button
+
if 'data-bs-target="#videoIndexerInfoModal"' not in admin_content:
print("β Video Indexer modal trigger button not found")
return False
-
+
print("β Video Indexer configuration modal properly integrated")
return True
-
- except Exception as e:
- print(f"β Test failed: {e}")
+
+ except Exception as exc:
+ print(f"β Test failed: {exc}")
import traceback
traceback.print_exc()
return False
+
+def test_speech_service_modal():
+ """Test that the AI Voice configuration modal is properly integrated."""
+ print("π Testing AI Voice configuration modal...")
+
+ try:
+ modal_path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ '..', 'application', 'single_app', 'templates', '_speech_service_info.html'
+ )
+
+ if not os.path.exists(modal_path):
+ print("β Speech Service modal template file not found")
+ return False
+
+ with open(modal_path, 'r', encoding='utf-8') as file_handle:
+ modal_content = file_handle.read()
+
+ required_elements = [
+ 'id="speechServiceInfoModal"',
+ 'Azure AI Voice Conversations Configuration Guide',
+ 'Cognitive Services Speech User',
+ 'custom-domain Speech endpoint',
+ 'Generate Custom Domain Name',
+ 'Keys and Endpoint',
+ 'updateSpeechServiceModalInfo()'
+ ]
+
+ for element in required_elements:
+ if element not in modal_content:
+ print(f"β Missing Speech modal element: {element}")
+ return False
+
+ admin_settings_path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ '..', 'application', 'single_app', 'templates', 'admin_settings.html'
+ )
+
+ with open(admin_settings_path, 'r', encoding='utf-8') as file_handle:
+ admin_content = file_handle.read()
+
+ if '_speech_service_info.html' not in admin_content:
+ print("β Speech Service modal not included in admin_settings.html")
+ return False
+
+ if 'data-bs-target="#speechServiceInfoModal"' not in admin_content:
+ print("β Speech Service modal trigger button not found")
+ return False
+
+ print("β AI Voice configuration modal properly integrated")
+ return True
+
+ except Exception as exc:
+ print(f"β Test failed: {exc}")
+ import traceback
+ traceback.print_exc()
+ return False
+
+
def test_multimedia_settings_preserved():
"""Test that all multimedia settings are preserved in the new location."""
print("π Testing multimedia settings preservation...")
-
+
try:
- # Read the admin_settings.html file
admin_settings_path = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ os.path.dirname(os.path.abspath(__file__)),
'..', 'application', 'single_app', 'templates', 'admin_settings.html'
)
-
- with open(admin_settings_path, 'r', encoding='utf-8') as f:
- content = f.read()
-
- # Check for video file support settings
+
+ with open(admin_settings_path, 'r', encoding='utf-8') as file_handle:
+ content = file_handle.read()
+
video_settings = [
'id="enable_video_file_support"',
+ 'id="video_indexer_cloud"',
'id="video_indexer_endpoint"',
+ 'id="video_indexer_endpoint_display"',
+ 'id="video_indexer_custom_endpoint"',
'id="video_indexer_account_id"',
- 'id="video_indexer_api_key"',
'id="video_indexer_location"',
'id="video_indexer_resource_group"',
'id="video_indexer_subscription_id"',
'id="video_indexer_account_name"',
'id="video_index_timeout"'
]
-
+
for setting in video_settings:
if setting not in content:
print(f"β Missing video setting: {setting}")
return False
-
- # Check for audio file support settings
+
audio_settings = [
'id="enable_audio_file_support"',
+ 'id="enable_speech_to_text_input"',
+ 'id="enable_text_to_speech"',
'id="speech_service_endpoint"',
'id="speech_service_location"',
+ 'id="speech_service_subscription_id"',
+ 'id="speech_service_resource_group"',
+ 'id="speech_service_resource_name"',
'id="speech_service_locale"',
+ 'id="speech_service_authentication_type"',
+ 'id="speech_service_resource_id"',
'id="speech_service_key"'
]
-
+
for setting in audio_settings:
if setting not in content:
print(f"β Missing audio setting: {setting}")
return False
-
- # Check for Enhanced Citations reference
- if 'Enhanced Citations' not in content:
- print("β Enhanced Citations reference not found")
+
+ if 'video_indexer_api_key' in content:
+ print("β Legacy Video Indexer API key field should not be present")
return False
-
- print("β All multimedia settings preserved in new location")
+
+ print("β All multimedia settings preserved in current location")
return True
-
- except Exception as e:
- print(f"β Test failed: {e}")
+
+ except Exception as exc:
+ print(f"β Test failed: {exc}")
import traceback
traceback.print_exc()
return False
+
def test_version_update():
"""Test that the version has been updated in config.py."""
print("π Testing version update...")
-
+
try:
- # Read the config.py file
config_path = os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
+ os.path.dirname(os.path.abspath(__file__)),
'..', 'application', 'single_app', 'config.py'
)
-
- with open(config_path, 'r', encoding='utf-8') as f:
- content = f.read()
-
- # Check for version update
- if 'VERSION = "0.229.017"' not in content:
- print("β Version not updated to 0.229.017")
+
+ with open(config_path, 'r', encoding='utf-8') as file_handle:
+ content = file_handle.read()
+
+ if 'VERSION = "0.241.010"' not in content:
+ print("β Version not updated to 0.241.010")
return False
-
- print("β Version successfully updated to 0.229.017")
+
+ print("β Version successfully updated to 0.241.010")
return True
-
- except Exception as e:
- print(f"β Test failed: {e}")
+
+ except Exception as exc:
+ print(f"β Test failed: {exc}")
import traceback
traceback.print_exc()
return False
+
if __name__ == "__main__":
tests = [
test_multimedia_support_move,
test_video_indexer_modal,
+ test_speech_service_modal,
test_multimedia_settings_preserved,
- test_version_update
+ test_version_update,
]
-
+
results = []
-
+
for test in tests:
print(f"\nπ§ͺ Running {test.__name__}...")
results.append(test())
-
+
success = all(results)
print(f"\nπ Results: {sum(results)}/{len(results)} tests passed")
-
+
if success:
- print("β All tests passed! Multimedia support successfully moved to Search and Extract tab with Video Indexer configuration modal.")
+ print("β All tests passed! Multimedia support guidance matches the current shared Speech and Video Indexer configuration.")
else:
print("β Some tests failed. Please review the changes.")
-
+
sys.exit(0 if success else 1)
diff --git a/functional_tests/test_tabular_entity_lookup_mode.py b/functional_tests/test_tabular_entity_lookup_mode.py
index 990513fb..d453dabe 100644
--- a/functional_tests/test_tabular_entity_lookup_mode.py
+++ b/functional_tests/test_tabular_entity_lookup_mode.py
@@ -205,7 +205,7 @@ def test_entity_lookup_primary_sheet_hint_prefers_anchor_entity_sheet():
assert likely_sheet == 'Taxpayers', likely_sheet
assert relevant_sheets[0] == 'Taxpayers', relevant_sheets
assert relevant_sheets.index('Taxpayers') < relevant_sheets.index('Notices'), relevant_sheets
- assert 'begin with filter_rows or query_tabular_data without sheet_name so the plugin can perform a cross-sheet discovery search' in route_content, route_content
+ assert 'begin with search_rows, filter_rows, or query_tabular_data without sheet_name so the plugin can perform a cross-sheet discovery search' in route_content, route_content
assert 'Do not start with aggregate_column, group_by_aggregate, or group_by_datetime_component until you have located the relevant entity rows.' in route_content, route_content
print('β Entity-lookup primary worksheet hinting passed')
diff --git a/functional_tests/test_tabular_exhaustive_result_synthesis_fix.py b/functional_tests/test_tabular_exhaustive_result_synthesis_fix.py
new file mode 100644
index 00000000..e976f120
--- /dev/null
+++ b/functional_tests/test_tabular_exhaustive_result_synthesis_fix.py
@@ -0,0 +1,238 @@
+#!/usr/bin/env python3
+# test_tabular_exhaustive_result_synthesis_fix.py
+"""
+Functional test for tabular exhaustive-result synthesis retry.
+Version: 0.241.007
+Implemented in: 0.241.006
+
+This test ensures exhaustive tabular requests retry when successful analytical
+tool calls already returned the full matching result set or only a partial row
+or distinct-value slice, but the synthesis response still behaves as though
+only schema samples are available.
+"""
+
+import ast
+import json
+import os
+import sys
+from types import SimpleNamespace
+
+
+ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.append(ROOT_DIR)
+sys.path.append(os.path.join(ROOT_DIR, 'application', 'single_app'))
+
+ROUTE_FILE = os.path.join(ROOT_DIR, 'application', 'single_app', 'route_backend_chats.py')
+TARGET_FUNCTIONS = {
+ 'question_requests_tabular_exhaustive_results',
+ 'parse_tabular_result_count',
+ 'get_tabular_invocation_result_payload',
+ 'is_tabular_access_limited_analysis',
+ 'get_tabular_result_coverage_summary',
+ 'build_tabular_success_execution_gap_messages',
+}
+
+
+def load_helpers():
+ """Load the targeted tabular retry helpers from the route source."""
+ with open(ROUTE_FILE, 'r', encoding='utf-8') as file_handle:
+ route_content = file_handle.read()
+
+ parsed = ast.parse(route_content, filename=ROUTE_FILE)
+ selected_nodes = []
+ for node in parsed.body:
+ if isinstance(node, ast.FunctionDef) and node.name in TARGET_FUNCTIONS:
+ selected_nodes.append(node)
+
+ module = ast.Module(body=selected_nodes, type_ignores=[])
+ namespace = {
+ 'json': json,
+ 're': __import__('re'),
+ }
+ exec(compile(module, ROUTE_FILE, 'exec'), namespace)
+ return namespace, route_content
+
+
+def test_exhaustive_tabular_retry_detects_full_result_access_gap():
+ """Verify full-result tool coverage forces a retry when synthesis claims sample-only access."""
+ print('π Testing exhaustive tabular retry for full-result access gaps...')
+
+ try:
+ helpers, route_content = load_helpers()
+ wants_exhaustive_results = helpers['question_requests_tabular_exhaustive_results']
+ is_access_limited_analysis = helpers['is_tabular_access_limited_analysis']
+ get_tabular_result_coverage_summary = helpers['get_tabular_result_coverage_summary']
+ build_execution_gap_messages = helpers['build_tabular_success_execution_gap_messages']
+
+ user_question = 'list out all of the security controls'
+ access_limited_analysis = (
+ 'The workbook contains 1,189 controls and control enhancements in NIST SP 800-53 Rev. 5, '
+ 'but the data provided here does not include the full 1,189-item list, only sample rows '
+ 'and workbook metadata. So I cannot accurately list all of them from the current evidence.'
+ )
+ invocations = [
+ SimpleNamespace(
+ function_name='query_tabular_data',
+ parameters={
+ 'filename': 'sp800-53r5-control-catalog.xlsx',
+ 'max_rows': '1189',
+ 'query_expression': '`Control Identifier` == `Control Identifier`',
+ },
+ result=json.dumps({
+ 'filename': 'sp800-53r5-control-catalog.xlsx',
+ 'selected_sheet': 'SP 800-53 Revision 5',
+ 'total_matches': 1189,
+ 'returned_rows': 1189,
+ 'data': [
+ {
+ 'Control Identifier': 'AC-1',
+ 'Control (or Control Enhancement) Name': 'Policy and Procedures',
+ },
+ {
+ 'Control Identifier': 'AC-2',
+ 'Control (or Control Enhancement) Name': 'Account Management',
+ },
+ ],
+ }),
+ error_message=None,
+ )
+ ]
+
+ coverage_summary = get_tabular_result_coverage_summary(invocations)
+ execution_gap_messages = build_execution_gap_messages(
+ user_question,
+ access_limited_analysis,
+ invocations,
+ )
+
+ assert wants_exhaustive_results(user_question), user_question
+ assert is_access_limited_analysis(access_limited_analysis), access_limited_analysis
+ assert coverage_summary['has_full_result_coverage'] is True, coverage_summary
+ assert coverage_summary['has_partial_result_coverage'] is False, coverage_summary
+ assert any('full matching result set' in message for message in execution_gap_messages), execution_gap_messages
+ assert any('list the full results the user asked for' in message for message in execution_gap_messages), execution_gap_messages
+ assert 'Do not claim that only sample rows or workbook metadata are available in that case.' in route_content, route_content
+
+ print('β Exhaustive tabular retry for full-result access gaps passed')
+ return True
+
+ except Exception as exc:
+ print(f'β Test failed: {exc}')
+ import traceback
+ traceback.print_exc()
+ return False
+
+
+def test_exhaustive_tabular_retry_detects_partial_result_slice():
+ """Verify exhaustive requests trigger a rerun when analytical tools only returned a partial slice."""
+ print('π Testing exhaustive tabular retry for partial result slices...')
+
+ try:
+ helpers, _ = load_helpers()
+ get_tabular_result_coverage_summary = helpers['get_tabular_result_coverage_summary']
+ build_execution_gap_messages = helpers['build_tabular_success_execution_gap_messages']
+
+ user_question = 'show me all of the matching security controls'
+ invocations = [
+ SimpleNamespace(
+ function_name='query_tabular_data',
+ parameters={
+ 'filename': 'sp800-53r5-control-catalog.xlsx',
+ 'max_rows': '100',
+ 'query_expression': '`Control Identifier` == `Control Identifier`',
+ },
+ result=json.dumps({
+ 'filename': 'sp800-53r5-control-catalog.xlsx',
+ 'selected_sheet': 'SP 800-53 Revision 5',
+ 'total_matches': 1189,
+ 'returned_rows': 100,
+ 'data': [
+ {
+ 'Control Identifier': 'AC-1',
+ 'Control (or Control Enhancement) Name': 'Policy and Procedures',
+ }
+ ],
+ }),
+ error_message=None,
+ )
+ ]
+
+ coverage_summary = get_tabular_result_coverage_summary(invocations)
+ execution_gap_messages = build_execution_gap_messages(
+ user_question,
+ 'Here is a representative sample of the matching controls.',
+ invocations,
+ )
+
+ assert coverage_summary['has_full_result_coverage'] is False, coverage_summary
+ assert coverage_summary['has_partial_result_coverage'] is True, coverage_summary
+ assert any('higher max_rows or max_values' in message for message in execution_gap_messages), execution_gap_messages
+
+ print('β Exhaustive tabular retry for partial result slices passed')
+ return True
+
+ except Exception as exc:
+ print(f'β Test failed: {exc}')
+ import traceback
+ traceback.print_exc()
+ return False
+
+
+def test_result_coverage_summary_marks_partial_distinct_value_slices():
+ """Verify distinct-value counts below the available total mark partial coverage."""
+ print('π Testing tabular result coverage summary for partial distinct-value slices...')
+
+ try:
+ helpers, _ = load_helpers()
+ get_tabular_result_coverage_summary = helpers['get_tabular_result_coverage_summary']
+
+ invocations = [
+ SimpleNamespace(
+ function_name='get_distinct_tabular_values',
+ parameters={
+ 'filename': 'sp800-53r5-control-catalog.xlsx',
+ 'column': 'Control Identifier',
+ 'max_values': '25',
+ },
+ result=json.dumps({
+ 'filename': 'sp800-53r5-control-catalog.xlsx',
+ 'selected_sheet': 'SP 800-53 Revision 5',
+ 'column': 'Control Identifier',
+ 'distinct_count': 1189,
+ 'returned_values': 25,
+ 'values': ['AC-1', 'AC-2'],
+ }),
+ error_message=None,
+ )
+ ]
+
+ coverage_summary = get_tabular_result_coverage_summary(invocations)
+
+ assert coverage_summary['has_full_result_coverage'] is False, coverage_summary
+ assert coverage_summary['has_partial_result_coverage'] is True, coverage_summary
+
+ print('β Tabular result coverage summary marks partial distinct-value slices')
+ return True
+
+ except Exception as exc:
+ print(f'β Test failed: {exc}')
+ import traceback
+ traceback.print_exc()
+ return False
+
+
+if __name__ == '__main__':
+ tests = [
+ test_exhaustive_tabular_retry_detects_full_result_access_gap,
+ test_exhaustive_tabular_retry_detects_partial_result_slice,
+ test_result_coverage_summary_marks_partial_distinct_value_slices,
+ ]
+ results = []
+
+ for test in tests:
+ print(f'\nπ§ͺ Running {test.__name__}...')
+ results.append(test())
+
+ success = all(results)
+ print(f'\nπ Results: {sum(results)}/{len(results)} tests passed')
+ sys.exit(0 if success else 1)
\ No newline at end of file
diff --git a/functional_tests/test_video_indexer_dual_authentication_support.py b/functional_tests/test_video_indexer_dual_authentication_support.py
index ad75ab5b..a7d2264d 100644
--- a/functional_tests/test_video_indexer_dual_authentication_support.py
+++ b/functional_tests/test_video_indexer_dual_authentication_support.py
@@ -1,249 +1,237 @@
#!/usr/bin/env python3
+# test_video_indexer_dual_authentication_support.py
"""
-Functional test for Video Indexer dual authentication support.
-Version: 0.229.064
-Implemented in: 0.229.064
+Functional test for current Video Indexer managed-identity guidance.
+Version: 0.241.007
+Implemented in: 0.241.007
-This test ensures that the video indexer supports both API key and managed identity
-authentication methods, with proper UI controls and backend logic.
+This legacy-named test now ensures the admin and backend flows reflect the
+current managed-identity-only Video Indexer configuration.
"""
-import sys
import os
+import sys
+
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'application', 'single_app'))
-def test_video_indexer_authentication_settings():
- """Test that video indexer authentication type setting is properly configured in default settings."""
- print("π Testing video indexer authentication settings...")
-
+
+def test_video_indexer_settings_shape():
+ """Test that current Video Indexer settings keep ARM fields and omit legacy auth toggles."""
+ print("π Testing current Video Indexer settings shape...")
+
try:
- # Check the functions_settings.py file directly for default value
- settings_file_path = os.path.join(
- 'application', 'single_app', 'functions_settings.py'
- )
-
+ settings_file_path = os.path.join('application', 'single_app', 'functions_settings.py')
+
if not os.path.exists(settings_file_path):
print("β functions_settings.py not found")
return False
-
- with open(settings_file_path, 'r', encoding='utf-8') as f:
- settings_content = f.read()
-
- # Check if video_indexer_authentication_type is defined
- if 'video_indexer_authentication_type' not in settings_content:
- print("β video_indexer_authentication_type not found in default settings file")
- return False
-
- # Check that default value is managed_identity
- if "'video_indexer_authentication_type': 'managed_identity'" not in settings_content:
- print("β Expected default authentication type to be 'managed_identity' in functions_settings.py")
+
+ with open(settings_file_path, 'r', encoding='utf-8') as file_handle:
+ settings_content = file_handle.read()
+
+ required_settings = [
+ "'video_indexer_endpoint':",
+ "'video_indexer_resource_group':",
+ "'video_indexer_subscription_id':",
+ "'video_indexer_account_name':",
+ "'video_indexer_account_id':",
+ "'video_indexer_location':",
+ ]
+
+ for setting_name in required_settings:
+ if setting_name not in settings_content:
+ print(f"β Missing Video Indexer setting: {setting_name}")
+ return False
+
+ if 'video_indexer_authentication_type' in settings_content:
+ print("β Legacy video_indexer_authentication_type should not be present")
return False
-
- print("β Video indexer authentication type setting verified in default settings")
+
+ print("β Current Video Indexer settings verified")
return True
-
- except Exception as e:
- print(f"β Test failed with error: {e}")
+
+ except Exception as exc:
+ print(f"β Test failed with error: {exc}")
import traceback
traceback.print_exc()
return False
+
def test_authentication_functions():
- """Test that both authentication functions are available and work correctly."""
- print("π Testing authentication functions...")
-
+ """Test that the runtime uses the ARM managed-identity token flow."""
+ print("π Testing Video Indexer authentication functions...")
+
try:
- # Check if the authentication functions exist
- auth_file_path = os.path.join(
- 'application', 'single_app', 'functions_authentication.py'
- )
-
+ auth_file_path = os.path.join('application', 'single_app', 'functions_authentication.py')
+
if not os.path.exists(auth_file_path):
print(f"β functions_authentication.py not found at {os.path.abspath(auth_file_path)}")
return False
-
- with open(auth_file_path, 'r', encoding='utf-8') as f:
- auth_content = f.read()
-
- # Check for required functions
+
+ with open(auth_file_path, 'r', encoding='utf-8') as file_handle:
+ auth_content = file_handle.read()
+
required_functions = [
'def get_video_indexer_account_token(',
- 'def get_video_indexer_api_key_token(',
- 'def get_video_indexer_managed_identity_token('
+ 'def get_video_indexer_managed_identity_token(',
+ 'DEFAULT_VIDEO_INDEXER_ARM_API_VERSION',
]
-
- for func in required_functions:
- if func not in auth_content:
- print(f"β Missing function: {func}")
+
+ for func_name in required_functions:
+ if func_name not in auth_content:
+ print(f"β Missing function or constant: {func_name}")
return False
-
- # Check for proper conditional logic
- if 'auth_type = settings.get("video_indexer_authentication_type"' not in auth_content:
- print("β Missing authentication type conditional logic")
- return False
-
- # Check for API key authentication pattern - should generate access token
- if 'if auth_type == "key":' not in auth_content:
- print("β Missing API key authentication conditional logic")
+
+ if 'get_video_indexer_api_key_token' in auth_content:
+ print("β Legacy Video Indexer API key helper should not be present")
return False
-
- # Verify API key method generates an access token via API
- required_api_key_patterns = [
- '/auth/',
- '/AccessToken',
- 'Ocp-Apim-Subscription-Key',
- 'requests.get',
- 'allowEdit'
- ]
-
- for pattern in required_api_key_patterns:
- if pattern not in auth_content:
- print(f"β Missing API key token generation pattern: {pattern}")
- return False
-
+
print("β Authentication functions verified")
return True
-
- except Exception as e:
- print(f"β Test failed with error: {e}")
+
+ except Exception as exc:
+ print(f"β Test failed with error: {exc}")
import traceback
traceback.print_exc()
return False
+
def test_video_processing_authentication_support():
- """Test that video processing functions support both authentication methods."""
+ """Test that video processing relies on the ARM token flow."""
print("π Testing video processing authentication support...")
-
+
try:
- docs_file_path = os.path.join(
- 'application', 'single_app', 'functions_documents.py'
- )
-
+ docs_file_path = os.path.join('application', 'single_app', 'functions_documents.py')
+
if not os.path.exists(docs_file_path):
print("β functions_documents.py not found")
return False
-
- with open(docs_file_path, 'r', encoding='utf-8') as f:
- docs_content = f.read()
-
- # Check for authentication type handling in upload
- # Both methods now use accessToken parameter
+
+ with open(docs_file_path, 'r', encoding='utf-8') as file_handle:
+ docs_content = file_handle.read()
+
required_patterns = [
- 'auth_type = settings.get("video_indexer_authentication_type"',
'"accessToken": token',
- 'get_video_indexer_account_token'
+ 'get_video_indexer_account_token',
]
-
+
for pattern in required_patterns:
if pattern not in docs_content:
print(f"β Missing pattern in video processing: {pattern}")
return False
-
+
print("β Video processing authentication support verified")
return True
-
- except Exception as e:
- print(f"β Test failed with error: {e}")
+
+ except Exception as exc:
+ print(f"β Test failed with error: {exc}")
import traceback
traceback.print_exc()
return False
+
def test_admin_ui_authentication_controls():
- """Test that admin UI includes authentication type controls."""
+ """Test that the admin UI reflects the managed-identity-only guidance."""
print("π Testing admin UI authentication controls...")
-
+
try:
- template_file_path = os.path.join(
- 'application', 'single_app', 'templates', 'admin_settings.html'
- )
-
+ template_file_path = os.path.join('application', 'single_app', 'templates', 'admin_settings.html')
+
if not os.path.exists(template_file_path):
print("β admin_settings.html template not found")
return False
-
- with open(template_file_path, 'r', encoding='utf-8') as f:
- template_content = f.read()
-
- # Check for authentication type selector
+
+ with open(template_file_path, 'r', encoding='utf-8') as file_handle:
+ template_content = file_handle.read()
+
required_ui_elements = [
- 'id="video_indexer_authentication_type"',
- 'name="video_indexer_authentication_type"',
- 'value="managed_identity"',
- 'value="key"',
- 'Managed Identity (Azure ARM)',
- 'API Key',
- 'id="video_indexer_api_key_section"',
- 'id="video_indexer_arm_section"',
- 'id="video_indexer_arm_fields"',
- 'toggleVideoIndexerAuthFields'
+ 'id="video_indexer_cloud"',
+ 'id="video_indexer_endpoint_display"',
+ 'id="video_indexer_custom_endpoint_group"',
+ 'App Service system-assigned managed identity',
+ 'Video Indexer API keys are not used by the current setup',
]
-
+
for element in required_ui_elements:
if element not in template_content:
print(f"β Missing UI element: {element}")
return False
-
- # Check for conditional display logic
- if 'style="display: none;"' not in template_content:
- print("β Missing conditional display logic")
- return False
-
+
+ removed_ui_elements = [
+ 'id="video_indexer_api_key"',
+ 'id="video_indexer_authentication_type"',
+ 'toggleVideoIndexerAuthFields',
+ ]
+
+ for removed in removed_ui_elements:
+ if removed in template_content:
+ print(f"β Legacy UI element still present: {removed}")
+ return False
+
print("β Admin UI authentication controls verified")
return True
-
- except Exception as e:
- print(f"β Test failed with error: {e}")
+
+ except Exception as exc:
+ print(f"β Test failed with error: {exc}")
import traceback
traceback.print_exc()
return False
+
def test_backend_form_handling():
- """Test that backend properly handles the authentication type form field."""
+ """Test that backend persists the current ARM-based Video Indexer fields."""
print("π Testing backend form handling...")
-
+
try:
- route_file_path = os.path.join(
- 'application', 'single_app', 'route_frontend_admin_settings.py'
- )
-
+ route_file_path = os.path.join('application', 'single_app', 'route_frontend_admin_settings.py')
+
if not os.path.exists(route_file_path):
print("β route_frontend_admin_settings.py not found")
return False
-
- with open(route_file_path, 'r', encoding='utf-8') as f:
- route_content = f.read()
-
- # Check for authentication type form handling
- if "'video_indexer_authentication_type':" not in route_content:
- print("β Missing authentication type form field handling")
- return False
-
- if "form_data.get('video_indexer_authentication_type'" not in route_content:
- print("β Missing authentication type form data extraction")
+
+ with open(route_file_path, 'r', encoding='utf-8') as file_handle:
+ route_content = file_handle.read()
+
+ required_fields = [
+ "'video_indexer_endpoint':",
+ "'video_indexer_resource_group':",
+ "'video_indexer_subscription_id':",
+ "'video_indexer_account_name':",
+ "'video_indexer_account_id':",
+ "'video_indexer_location':",
+ ]
+
+ for field_name in required_fields:
+ if field_name not in route_content:
+ print(f"β Missing backend form field handling: {field_name}")
+ return False
+
+ if 'video_indexer_authentication_type' in route_content:
+ print("β Legacy authentication type form field should not be present")
return False
-
+
print("β Backend form handling verified")
return True
-
- except Exception as e:
- print(f"β Test failed with error: {e}")
+
+ except Exception as exc:
+ print(f"β Test failed with error: {exc}")
import traceback
traceback.print_exc()
return False
+
if __name__ == "__main__":
success = True
-
- # Run the tests
+
tests = [
- test_video_indexer_authentication_settings,
+ test_video_indexer_settings_shape,
test_authentication_functions,
test_video_processing_authentication_support,
test_admin_ui_authentication_controls,
- test_backend_form_handling
+ test_backend_form_handling,
]
-
+
results = []
for test in tests:
print(f"\nπ§ͺ Running {test.__name__}...")
@@ -251,11 +239,11 @@ def test_backend_form_handling():
results.append(result)
if not result:
success = False
-
+
print(f"\nπ Results: {sum(results)}/{len(results)} tests passed")
-
+
if success:
- print("β All Video Indexer dual authentication support tests passed!")
+ print("β All Video Indexer managed-identity guidance tests passed!")
else:
print("β Some tests failed")
diff --git a/ui_tests/test_admin_multimedia_guidance.py b/ui_tests/test_admin_multimedia_guidance.py
new file mode 100644
index 00000000..73e90bb2
--- /dev/null
+++ b/ui_tests/test_admin_multimedia_guidance.py
@@ -0,0 +1,105 @@
+# test_admin_multimedia_guidance.py
+"""
+UI test for admin multimedia guidance and shared Speech controls.
+
+Version: 0.241.010
+Implemented in: 0.241.010
+
+This test ensures the Search & Extract admin tab exposes the Video Indexer
+cloud selector, the AI Voice setup guide, and the shared Speech managed-identity fields.
+"""
+
+import os
+from pathlib import Path
+
+import pytest
+from playwright.sync_api import expect
+
+
+BASE_URL = os.getenv("SIMPLECHAT_UI_BASE_URL", "").rstrip("/")
+ADMIN_STORAGE_STATE = os.getenv("SIMPLECHAT_UI_ADMIN_STORAGE_STATE", "")
+
+
+def _require_base_url():
+ if not BASE_URL:
+ pytest.skip("Set SIMPLECHAT_UI_BASE_URL to run this UI test.")
+
+
+def _require_storage_state():
+ if not ADMIN_STORAGE_STATE or not Path(ADMIN_STORAGE_STATE).exists():
+ pytest.skip("Set SIMPLECHAT_UI_ADMIN_STORAGE_STATE to a valid authenticated Playwright storage state file.")
+
+
+@pytest.mark.ui
+def test_admin_multimedia_guidance(playwright):
+ """Validate the admin multimedia panel guidance and dynamic shared Speech fields."""
+ _require_base_url()
+ _require_storage_state()
+
+ browser = playwright.chromium.launch()
+ context = browser.new_context(
+ storage_state=ADMIN_STORAGE_STATE,
+ viewport={"width": 1440, "height": 900},
+ )
+
+ try:
+ page = context.new_page()
+ response = page.goto(f"{BASE_URL}/admin/settings#search-extract", wait_until="domcontentloaded")
+ assert response is not None, "Expected a navigation response when loading /admin/settings."
+ if response.status in {401, 403, 404}:
+ pytest.skip("Admin settings page was not available for the configured admin session.")
+
+ assert response.ok, f"Expected /admin/settings to load successfully, got HTTP {response.status}."
+
+ search_extract_nav = page.locator('[data-bs-target="#search-extract"], [data-tab="search-extract"]').first
+ if search_extract_nav.count() > 0:
+ search_extract_nav.click()
+
+ expect(page.locator("#video_indexer_cloud")).to_have_count(1)
+ expect(page.locator("#video_indexer_endpoint_display")).to_have_count(1)
+
+ video_toggle = page.locator("#enable_video_file_support")
+ if not video_toggle.is_checked():
+ video_toggle.check(force=True)
+
+ expect(page.locator("#video_indexer_settings")).to_be_visible()
+
+ page.locator("#video_indexer_cloud").select_option("custom")
+ expect(page.locator("#video_indexer_custom_endpoint_group")).to_be_visible()
+
+ custom_endpoint = "https://video-indexer.contoso.example"
+ page.locator("#video_indexer_custom_endpoint").fill(custom_endpoint)
+ expect(page.locator("#video_indexer_endpoint_display")).to_have_value(custom_endpoint)
+
+ modal_trigger = page.locator('[data-bs-target="#videoIndexerInfoModal"]').first
+ expect(modal_trigger).to_have_count(1)
+ modal_trigger.click()
+ expect(page.locator("#videoIndexerInfoModal")).to_be_visible()
+ expect(page.locator("#videoIndexerInfoModal")).to_contain_text("App Service system-assigned managed identity")
+ page.locator('#videoIndexerInfoModal button[data-bs-dismiss="modal"]').click()
+
+ tts_toggle = page.locator("#enable_text_to_speech")
+ if not tts_toggle.is_checked():
+ tts_toggle.check(force=True)
+
+ expect(page.locator("#audio_service_settings")).to_be_visible()
+ page.locator("#speech_service_authentication_type").select_option("managed_identity")
+ expect(page.locator("#speech_service_resource_id_container")).to_be_visible()
+ expect(page.locator("#speech_service_key_container")).not_to_be_visible()
+
+ page.locator("#speech_service_subscription_id").fill("12345678-1234-1234-1234-123456789abc")
+ page.locator("#speech_service_resource_group").fill("rg-speech-prod")
+ page.locator("#speech_service_resource_name").fill("my-speech-resource")
+ expect(page.locator("#speech_service_resource_id")).to_have_value(
+ "/subscriptions/12345678-1234-1234-1234-123456789abc/resourceGroups/rg-speech-prod/providers/Microsoft.CognitiveServices/accounts/my-speech-resource"
+ )
+
+ page.locator('[data-bs-target="#speechServiceInfoModal"]').click()
+ expect(page.locator("#speechServiceInfoModal")).to_be_visible()
+ expect(page.locator("#speechServiceInfoModal")).to_contain_text("Cognitive Services Speech User")
+ expect(page.locator("#speechServiceInfoModal")).to_contain_text("Generate Custom Domain Name")
+ expect(page.locator("#speechServiceInfoModal")).to_contain_text("Keys and Endpoint")
+ expect(page.locator("#speechServiceInfoModal")).to_contain_text("my-speech-resource")
+ finally:
+ context.close()
+ browser.close()
diff --git a/ui_tests/test_group_workspace_prompt_role_containers_ui.py b/ui_tests/test_group_workspace_prompt_role_containers_ui.py
new file mode 100644
index 00000000..42412f61
--- /dev/null
+++ b/ui_tests/test_group_workspace_prompt_role_containers_ui.py
@@ -0,0 +1,126 @@
+# test_group_workspace_prompt_role_containers_ui.py
+"""
+UI test for group workspace prompt role UI guard.
+Version: 0.241.007
+Implemented in: 0.241.007
+
+This test ensures that missing prompt role containers do not break the group
+workspace documents tab on first load.
+"""
+
+import json
+import os
+from pathlib import Path
+
+import pytest
+from playwright.sync_api import expect
+
+
+BASE_URL = os.getenv("SIMPLECHAT_UI_BASE_URL", "").rstrip("/")
+STORAGE_STATE = os.getenv("SIMPLECHAT_UI_STORAGE_STATE", "")
+
+
+def _require_ui_env():
+ if not BASE_URL:
+ pytest.skip("Set SIMPLECHAT_UI_BASE_URL to run this UI test.")
+ if not STORAGE_STATE or not Path(STORAGE_STATE).exists():
+ pytest.skip(
+ "Set SIMPLECHAT_UI_STORAGE_STATE to a valid authenticated Playwright storage state file."
+ )
+
+
+def _fulfill_json(route, payload, status=200):
+ route.fulfill(
+ status=status,
+ content_type="application/json",
+ body=json.dumps(payload),
+ )
+
+
+@pytest.mark.ui
+def test_group_workspace_load_tolerates_missing_prompt_role_containers(playwright):
+ """Validate documents still render when prompt role containers are absent."""
+ _require_ui_env()
+
+ browser = playwright.chromium.launch()
+ context = browser.new_context(
+ storage_state=STORAGE_STATE,
+ viewport={"width": 1440, "height": 900},
+ )
+ page = context.new_page()
+
+ page_errors = []
+ page.on("pageerror", lambda error: page_errors.append(str(error)))
+ page.add_init_script(
+ """
+ window.addEventListener('DOMContentLoaded', () => {
+ document.getElementById('group-prompts-role-warning')?.remove();
+ document.getElementById('create-group-prompt-section')?.remove();
+ });
+ """
+ )
+
+ page.route(
+ "**/api/groups?page_size=1000",
+ lambda route: _fulfill_json(
+ route,
+ {
+ "groups": [
+ {
+ "id": "group-alpha",
+ "name": "Alpha Team",
+ "isActive": True,
+ "userRole": "Owner",
+ "status": "active",
+ }
+ ]
+ },
+ ),
+ )
+ page.route(
+ "**/api/group_documents?*",
+ lambda route: _fulfill_json(
+ route,
+ {
+ "documents": [],
+ "page": 1,
+ "page_size": 10,
+ "total_count": 0,
+ },
+ ),
+ )
+ page.route(
+ "**/api/group_documents/tags?*",
+ lambda route: _fulfill_json(route, {"tags": []}),
+ )
+
+ try:
+ response = page.goto(f"{BASE_URL}/group_workspaces", wait_until="networkidle")
+
+ assert response is not None, "Expected a navigation response when loading /group_workspaces."
+ assert response.ok, f"Expected /group_workspaces to load successfully, got HTTP {response.status}."
+
+ page.wait_for_function(
+ """
+ () => {
+ const tbody = document.querySelector('#group-documents-table tbody');
+ return tbody && tbody.textContent.includes('No documents found in this group.');
+ }
+ """
+ )
+
+ expect(page.locator("#group-documents-table tbody")).to_contain_text(
+ "No documents found in this group."
+ )
+
+ prompt_role_errors = [
+ error
+ for error in page_errors
+ if "Cannot read properties of null" in error
+ or "create-group-prompt-section" in error
+ or "group-prompts-role-warning" in error
+ ]
+ assert not prompt_role_errors, f"Unexpected prompt role UI page errors: {prompt_role_errors}"
+ finally:
+ context.close()
+ browser.close()
\ No newline at end of file
diff --git a/ui_tests/test_group_workspace_prompt_role_ui_resilience.py b/ui_tests/test_group_workspace_prompt_role_ui_resilience.py
new file mode 100644
index 00000000..a7c0151f
--- /dev/null
+++ b/ui_tests/test_group_workspace_prompt_role_ui_resilience.py
@@ -0,0 +1,175 @@
+# test_group_workspace_prompt_role_ui_resilience.py
+"""
+UI test for group workspace prompt role UI resilience.
+Version: 0.241.003
+Implemented in: 0.241.003
+
+This test ensures the group workspace can refresh active group context without
+raising client-side errors when the prompt role warning and create button
+containers are absent from the DOM.
+"""
+
+import json
+import os
+from pathlib import Path
+
+import pytest
+
+
+BASE_URL = os.getenv("SIMPLECHAT_UI_BASE_URL", "").rstrip("/")
+STORAGE_STATE = os.getenv("SIMPLECHAT_UI_STORAGE_STATE", "")
+
+
+def _require_ui_env():
+ if not BASE_URL:
+ pytest.skip("Set SIMPLECHAT_UI_BASE_URL to run this UI test.")
+ if not STORAGE_STATE or not Path(STORAGE_STATE).exists():
+ pytest.skip(
+ "Set SIMPLECHAT_UI_STORAGE_STATE to a valid authenticated Playwright storage state file."
+ )
+
+
+def _fulfill_json(route, payload, status=200):
+ route.fulfill(
+ status=status,
+ content_type="application/json",
+ body=json.dumps(payload),
+ )
+
+
+@pytest.mark.ui
+def test_group_workspace_group_change_tolerates_missing_prompt_role_elements(playwright):
+ """Validate that a group change does not raise prompt role UI null errors."""
+ _require_ui_env()
+
+ browser = playwright.chromium.launch()
+ context = browser.new_context(
+ storage_state=STORAGE_STATE,
+ viewport={"width": 1440, "height": 900},
+ )
+ page = context.new_page()
+
+ page_errors = []
+ page.on("pageerror", lambda error: page_errors.append(str(error)))
+
+ group_payloads = [
+ {
+ "groups": [
+ {
+ "id": "group-alpha",
+ "name": "Alpha Team",
+ "isActive": True,
+ "userRole": "Owner",
+ "status": "active",
+ },
+ {
+ "id": "group-beta",
+ "name": "Beta Team",
+ "isActive": False,
+ "userRole": "Admin",
+ "status": "active",
+ },
+ ]
+ },
+ {
+ "groups": [
+ {
+ "id": "group-alpha",
+ "name": "Alpha Team",
+ "isActive": False,
+ "userRole": "Owner",
+ "status": "active",
+ },
+ {
+ "id": "group-beta",
+ "name": "Beta Team",
+ "isActive": True,
+ "userRole": "Admin",
+ "status": "active",
+ },
+ ]
+ },
+ ]
+
+ def handle_groups(route):
+ payload = group_payloads[0]
+ if len(group_payloads) > 1:
+ payload = group_payloads.pop(0)
+ _fulfill_json(route, payload)
+
+ page.route("**/api/groups?page_size=1000", handle_groups)
+ page.route(
+ "**/api/groups/setActive",
+ lambda route: _fulfill_json(route, {"success": True}),
+ )
+ page.route(
+ "**/api/group_documents?*",
+ lambda route: _fulfill_json(
+ route,
+ {
+ "documents": [],
+ "page": 1,
+ "page_size": 10,
+ "total_count": 0,
+ },
+ ),
+ )
+ page.route(
+ "**/api/group_documents/tags?*",
+ lambda route: _fulfill_json(route, {"tags": []}),
+ )
+
+ try:
+ response = page.goto(f"{BASE_URL}/group_workspaces", wait_until="networkidle")
+
+ assert response is not None, "Expected a navigation response when loading /group_workspaces."
+ assert response.ok, f"Expected /group_workspaces to load successfully, got HTTP {response.status}."
+
+ page.wait_for_function(
+ """
+ () => {
+ const tbody = document.querySelector('#group-documents-table tbody');
+ return tbody && tbody.textContent.includes('No documents found in this group.');
+ }
+ """
+ )
+
+ page.evaluate(
+ """
+ () => {
+ document.getElementById('create-group-prompt-section')?.remove();
+ document.getElementById('group-prompts-role-warning')?.remove();
+
+ const select = document.getElementById('group-select');
+ if (select) {
+ select.value = 'group-beta';
+ }
+
+ const selectedText = document.querySelector('#group-dropdown-button .selected-group-text');
+ if (selectedText) {
+ selectedText.textContent = 'Beta Team';
+ }
+ }
+ """
+ )
+
+ page.locator("#btn-change-group").click()
+ page.wait_for_function(
+ """
+ () => {
+ const role = document.getElementById('user-role');
+ return role && role.textContent.trim() === 'Admin';
+ }
+ """
+ )
+
+ null_style_errors = [
+ error
+ for error in page_errors
+ if "Cannot read properties of null (reading 'style')" in error
+ ]
+
+ assert not null_style_errors, f"Unexpected prompt role UI errors: {null_style_errors}"
+ finally:
+ context.close()
+ browser.close()
\ No newline at end of file