From ef0dda7cd8d1309bc82cb1cd702051991aa8d2c3 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 23 Dec 2025 16:50:22 +0000 Subject: [PATCH 01/10] feat: Add creator biographical information to EAD XML exports Extract bioghist from ArchivesSpace agent records and inject into EAD: - Retrieve bioghist notes from linked agent records - Inject structured XML into EAD section - Preserve HTML markup for proper rendering in ArcLight - Fix bioghist element nesting per EAD schema requirements - Add Copilot agent onboarding documentation This enables archival collections to display biographical and historical context about creators directly in the finding aid. --- .github/copilot-instructions.md | 98 +++++++++++++++++++++ arcflow/main.py | 150 ++++++++++++++++++++++++++++---- 2 files changed, 230 insertions(+), 18 deletions(-) create mode 100644 .github/copilot-instructions.md diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000..b3d33a0 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,98 @@ +# Copilot Agent Instructions for arcflow + +This file provides guidance for GitHub Copilot agents working on the arcflow repository. + +## Commit Style + +When making changes to this repository, use **granular, single-purpose commits**: + +### Guidelines + +- **One commit per logical change** - Each commit should do one thing and do it well +- **Separate refactoring from features** - Don't mix code restructuring with new functionality +- **Clear, descriptive messages** - Explain what the commit does and why +- **Include imports with usage** - Add necessary imports in the same commit where they're used, not as separate commits + +### Examples + +Good commit sequence: +``` +1. Refactor XML injection logic for extensibility +2. Add linked_agents to resolve parameter +3. Add get_creator_bioghist method + (includes import of xml.sax.saxutils.escape used in the method) +4. Integrate bioghist into XML injection +5. Update comment to reflect new behavior +``` + +Bad commit sequences: + +Too dense: +``` +1. Add creator biographical information to EAD XML exports + (combines refactoring, new imports, new methods, and integration) +``` + +Too granular: +``` +1. Import xml.sax.saxutils.escape +2. Add get_creator_bioghist method that uses xml.sax.saxutils.escape + (import should have been included in this commit) +``` + +### Commit Message Format + +- **First line**: Clear, concise summary (50-72 characters) +- **Body** (optional): Bullet points explaining the changes +- **Keep it focused**: If you need many bullets, consider splitting into multiple commits + +### Why This Matters + +- Makes code review easier +- Helps understand the progression of changes +- Easier to revert specific changes if needed +- Clear history for future maintainers + +--- + +## XML Content Handling in EAD Pipeline + +When injecting content into EAD XML files, distinguish between plain text and structured XML: + +### Escaping Strategy + +- **Plain text labels** (recordgroup, subgroup): Use `xml_escape()` to escape special characters (`&`, `<`, `>`) + - These are simple strings that may contain characters that break XML syntax + - Example: `xml_escape(rg_label)` → converts `"Group & Co"` to `"Group & Co"` + +- **Structured EAD XML content** (bioghist, scopecontent): Do NOT escape + - Content from ArchivesSpace already contains valid EAD XML markup (``, ``, etc.) + - These are legitimate XML nodes that must be preserved + - Escaping would convert them to literal text: `<emph>` → `<emph>` + - Example: Pass through as-is: `f'<p>{subnote["content"]}</p>'` + +### Why This Matters + +The Traject indexing pipeline and ArcLight display rely on proper XML structure: +1. Traject's `.to_html` converts XML nodes to HTML +2. ArcLight's `render_html_tags` processes the HTML for display +3. If XML nodes are escaped (treated as text), they can't be processed and appear as raw markup + +### Pattern for Future Fields + +When adding new EAD fields to the pipeline: +1. Determine if content is plain text or structured XML +2. Apply escaping only to plain text +3. Pass structured XML through unchanged +4. Document the decision in code comments + +--- + +## Adding More Instructions + +To add additional instructions to this file: + +1. Add a new section with a clear heading (e.g., `## Testing Strategy`, `## Code Style`) +2. Keep instructions concise and actionable +3. Use examples where helpful +4. Maintain the simple, scannable format diff --git a/arcflow/main.py b/arcflow/main.py index f8f1cc8..a8621fa 100644 --- a/arcflow/main.py +++ b/arcflow/main.py @@ -10,6 +10,7 @@ import logging import math from xml.dom.pulldom import parse, START_ELEMENT +from xml.sax.saxutils import escape as xml_escape from datetime import datetime, timezone from asnake.client import ASnakeClient from multiprocessing.pool import ThreadPool as Pool @@ -206,7 +207,7 @@ def task_resource(self, repo, resource_id, xml_dir, pdf_dir, indent_size=0): resource = self.client.get( f'{repo["uri"]}/resources/{resource_id}', params={ - 'resolve': ['classifications', 'classification_terms'], + 'resolve': ['classifications', 'classification_terms', 'linked_agents'], }).json() xml_file_path = f'{xml_dir}/{resource["ead_id"]}.xml' @@ -226,24 +227,58 @@ def task_resource(self, repo, resource_id, xml_dir, pdf_dir, indent_size=0): 'ead3': 'false', }) - # add record group and subgroup labels to EAD inside <archdesc level="collection"> + # add custom XML elements to EAD inside <archdesc level="collection"> + # (record group/subgroup labels and biographical/historical notes) if xml.content: - rg_label, sg_label = extract_labels(resource)[1:3] - if rg_label: - xml_content = xml.content.decode('utf-8') - insert_pos = xml_content.find('<archdesc level="collection">') - if insert_pos != -1: - # Find the position after the opening tag - insert_pos = xml_content.find('</did>', insert_pos) - extra_xml = f'<recordgroup>{rg_label}</recordgroup>' - if sg_label: - extra_xml += f'<subgroup>{sg_label}</subgroup>' - xml_content = (xml_content[:insert_pos] + - extra_xml + - xml_content[insert_pos:]) - xml_content = xml_content.encode('utf-8') - else: - xml_content = xml.content + xml_content = xml.content.decode('utf-8') + insert_pos = xml_content.find('<archdesc level="collection">') + + if insert_pos != -1: + # Find the position after the closing </did> tag + did_end_pos = xml_content.find('</did>', insert_pos) + + if did_end_pos != -1: + # Move to after the </did> tag + did_end_pos += len('</did>') + extra_xml = '' + + # Add record group and subgroup labels + rg_label, sg_label = extract_labels(resource)[1:3] + if rg_label: + extra_xml += f'\n<recordgroup>{xml_escape(rg_label)}</recordgroup>' + if sg_label: + extra_xml += f'\n<subgroup>{xml_escape(sg_label)}</subgroup>' + + # Handle biographical/historical notes from creator agents + bioghist_content = self.get_creator_bioghist(resource, indent_size=indent_size) + if bioghist_content: + # Check if there's already a bioghist element in the EAD + # Search for existing bioghist after </did> but before </archdesc> + archdesc_end = xml_content.find('</archdesc>', did_end_pos) + search_section = xml_content[did_end_pos:archdesc_end] if archdesc_end != -1 else xml_content[did_end_pos:] + + # Look for closing </bioghist> tag + existing_bioghist_end = search_section.rfind('</bioghist>') + + if existing_bioghist_end != -1: + # Found existing bioghist - insert agent elements INSIDE it (before closing tag) + insert_pos = did_end_pos + existing_bioghist_end + xml_content = (xml_content[:insert_pos] + + f'\n{bioghist_content}\n' + + xml_content[insert_pos:]) + else: + # No existing bioghist - wrap agent elements in parent container + wrapped_content = f'<bioghist>\n{bioghist_content}\n</bioghist>' + extra_xml += f'\n{wrapped_content}' + + if extra_xml: + xml_content = (xml_content[:did_end_pos] + + extra_xml + + xml_content[did_end_pos:]) + + xml_content = xml_content.encode('utf-8') + else: + xml_content = xml.content # next level of indentation for nested operations indent_size += 2 @@ -511,6 +546,85 @@ def index(self, repo_id, xml_file_path, indent_size=0): self.log.error(f'{indent}Error indexing pending resources in repository ID {repo_id} to ArcLight Solr: {e}') + def get_creator_bioghist(self, resource, indent_size=0): + """ + Get biographical/historical notes from creator agents linked to the resource. + Returns nested bioghist elements for each creator, or None if no creator agents have notes. + Each bioghist element includes the creator name in a head element and an id attribute. + """ + indent = ' ' * indent_size + bioghist_elements = [] + + if 'linked_agents' not in resource: + return None + + # Process linked_agents in order to maintain consistency with origination order + for linked_agent in resource['linked_agents']: + # Only process agents with 'creator' role + if linked_agent.get('role') == 'creator': + agent_ref = linked_agent.get('ref') + if agent_ref: + try: + agent = self.client.get(agent_ref).json() + + # Get agent name for head element + agent_name = agent.get('title') or agent.get('display_name', {}).get('sort_name', 'Unknown') + + # Check for notes in the agent record + if 'notes' in agent: + for note in agent['notes']: + # Look for biographical/historical notes + if note.get('jsonmodel_type') == 'note_bioghist': + # Get persistent_id for the id attribute + persistent_id = note.get('persistent_id', '') + if not persistent_id: + self.log.error(f'{indent}**ASSUMPTION VIOLATION**: Expected persistent_id in note_bioghist for agent {agent_ref}') + # Skip creating id attribute if persistent_id is missing + persistent_id = None + + # Extract note content from subnotes + paragraphs = [] + if 'subnotes' in note: + for subnote in note['subnotes']: + if 'content' in subnote: + # Split content on single newlines to create paragraphs + content = subnote['content'] + # Handle content as either string or list with explicit type checking + if isinstance(content, str): + # Split on newline and filter out empty strings + lines = [line.strip() for line in content.split('\n') if line.strip()] + elif isinstance(content, list): + # Content is already a list - use as is + lines = [str(item).strip() for item in content if str(item).strip()] + else: + # Log unexpected content type prominently + self.log.error(f'{indent}**ASSUMPTION VIOLATION**: Expected string or list for subnote content in agent {agent_ref}, got {type(content).__name__}') + continue + # Wrap each line in <p> tags + for line in lines: + paragraphs.append(f'<p>{line}</p>') + + # Create nested bioghist element if we have paragraphs + if paragraphs: + paragraphs_xml = '\n'.join(paragraphs) + heading = f'Historical Note from {xml_escape(agent_name)} Creator Record' + # Only include id attribute if persistent_id is available + if persistent_id: + bioghist_el = f'<bioghist id="aspace_{persistent_id}"><head>{heading}</head>\n{paragraphs_xml}\n</bioghist>' + else: + bioghist_el = f'<bioghist><head>{heading}</head>\n{paragraphs_xml}\n</bioghist>' + bioghist_elements.append(bioghist_el) + except Exception as e: + self.log.error(f'{indent}Error fetching biographical information for agent {agent_ref}: {e}') + + if bioghist_elements: + # Return the agent bioghist elements (unwrapped) + # The caller will decide whether to wrap them based on whether + # an existing bioghist element exists + return '\n'.join(bioghist_elements) + return None + + def get_repo_id(self, repo): """ Get the repository ID from the repository URI. From b3f77ebcf1181c60411a89cf68c4dbf2fdf218e8 Mon Sep 17 00:00:00 2001 From: Alex Dryden <adryden3@illinois.edu> Date: Wed, 11 Feb 2026 14:13:05 -0500 Subject: [PATCH 02/10] feat(arclight#29): Add creator/agent indexing system for ArcLight MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement complete ETL pipeline for ArchivesSpace agents: - Extract all agent records via ArchivesSpace API - Generate EAC-CPF XML documents for each agent - Auto-discover and configure traject indexing - Batch index to Solr (100 files per call for performance) - Support multiple processing modes (agents-only, collections-only, both) - Add 11 new Solr fields for agent metadata - Include 271-line traject config for EAC-CPF → Solr mapping Key features: - Parallel to existing collection record indexing - Dynamic Solr field mapping for ArcLight compatibility - Robust error handling and logging - Configurable traject config discovery paths This allows ArcLight to provide dedicated agent/creator pages with full biographical information, related collections, and authority control. --- README.md | 189 ++++++++++++++++- arcflow/main.py | 430 ++++++++++++++++++++++++++++++++++++-- traject_config_eac_cpf.rb | 275 ++++++++++++++++++++++++ 3 files changed, 880 insertions(+), 14 deletions(-) create mode 100644 traject_config_eac_cpf.rb diff --git a/README.md b/README.md index f6397ac..6c570bf 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,190 @@ # ArcFlow -Code for exporting data from ArchivesSpace to ArcLight, along with additional utility scripts for data handling and transformation. \ No newline at end of file +Code for exporting data from ArchivesSpace to ArcLight, along with additional utility scripts for data handling and transformation. + +## Quick Start + +This directory contains a complete, working installation of arcflow with creator records support. To run it: + +```bash +# 1. Install dependencies +pip install -r requirements.txt + +# 2. Configure credentials +cp .archivessnake.yml.example .archivessnake.yml +nano .archivessnake.yml # Add your ArchivesSpace credentials + +# 3. Set environment variables +export ARCLIGHT_DIR=/path/to/your/arclight-app +export ASPACE_DIR=/path/to/your/archivesspace +export SOLR_URL=http://localhost:8983/solr/blacklight-core + +# 4. Run arcflow +python -m arcflow.main + +``` + +--- + +## Features + +- **Collection Indexing**: Exports EAD XML from ArchivesSpace and indexes to ArcLight Solr +- **Creator Records**: Extracts creator agent information and indexes as standalone documents +- **Biographical Notes**: Injects creator biographical/historical notes into collection EAD XML +- **PDF Generation**: Generates finding aid PDFs via ArchivesSpace jobs +- **Incremental Updates**: Supports modified-since filtering for efficient updates + +## Creator Records + +ArcFlow now generates standalone creator documents in addition to collection records. Creator documents: + +- Include biographical/historical notes from ArchivesSpace agent records +- Link to all collections where the creator is listed +- Can be searched and displayed independently in ArcLight +- Are marked with `is_creator: true` to distinguish from collections +- Must be fed into a Solr instance with fields to match their specific facets (See:Configure Solr Schema below ) + +### How Creator Records Work + +1. **Extraction**: `get_all_agents()` fetches all agents from ArchivesSpace +2. **Processing**: `task_agent()` generates an EAC-CPF XML document for each agent with bioghist notes +3. **Linking**: Handled via Solr using the persistent_id field (agents and collections linked through bioghist references) +4. **Indexing**: Creator XML files are indexed to Solr using `traject_config_eac_cpf.rb` + +### Creator Document Format + +Creator documents are stored as XML files in `agents/` directory using the ArchivesSpace EAC-CPF export: + +```xml +<?xml version="1.0" encoding="UTF-8"?> +<eac-cpf xml:lang="eng" xmlns="urn:isbn:1-931666-33-4" xmlns:html="http://www.w3.org/1999/xhtml" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:isbn:1-931666-33-4 https://eac.staatsbibliothek-berlin.de/schema/cpf.xsd"> + <control/> + <cpfDescription> + <identity> + <entityType>corporateBody</entityType> + <nameEntry> + <part localType="primary_name">Core: Leadership, Infrastructure, Futures</part> + <authorizedForm>local</authorizedForm> + </nameEntry> + </identity> + <description> + <existDates> + <date localType="existence" standardDate="2020">2020-</date> + </existDates> + <biogHist> + <p>Founded on September 1, 2020, the Core: Leadership, Infrastructure, Futures division of the American Library Association has a mission to cultivate and amplify the collective expertise of library workers in core functions through community building, advocacy, and learning. + In June 2020, the ALA Council voted to approve Core: Leadership, Infrastructure, Futures as a new ALA division beginning September 1, 2020, and to dissolve the Association for Library Collections and Technical Services (ALCTS), the Library Information Technology Association (LITA) and the Library Leadership and Management Association (LLAMA) effective August 31, 2020. The vote to form Core was 163 to 1.(1)</p> + <citation>1. "ALA Council approves Core; dissolves ALCTS, LITA and LLAMA," July 1, 2020, http://www.ala.org/news/member-news/2020/07/ala-council-approves-core-dissolves-alcts-lita-and-llama.</citation> + </biogHist> + </description> + <relations/> + </cpfDescription> +</eac-cpf> +``` + +### Indexing Creator Documents + +#### Configure Solr Schema (Required Before Indexing) + +⚠️ **CRITICAL PREREQUISITE** - Before you can index creator records to Solr, you must configure the Solr schema. + +**See [SOLR_SCHEMA.md](SOLR_SCHEMA.md) for complete instructions on:** +- Which fields to add (is_creator, creator_persistent_id, etc.) +- Three methods to add them (Schema API recommended, managed-schema, or schema.xml) +- How to verify they're added +- Troubleshooting "unknown field" errors + +**Quick Schema Setup (Schema API method):** +```bash +# Add is_creator field +curl -X POST -H 'Content-type:application/json' \ + http://localhost:8983/solr/blacklight-core/schema \ + -d '{"add-field": {"name": "is_creator", "type": "boolean", "indexed": true, "stored": true}}' + +# Add other required fields (see SOLR_SCHEMA.md for complete list) +``` + +**Verify schema is configured:** +```bash +curl "http://localhost:8983/solr/blacklight-core/schema/fields/is_creator" +# Should return field definition, not 404 +``` + +⚠️ **If you skip this step, you'll get:** +``` +ERROR: [doc=creator_corporate_entities_584] unknown field 'is_creator' +``` + +This is a **one-time setup** per Solr instance. + +--- + +To index creator documents to Solr: + +```bash +bundle exec traject \ + -u http://localhost:8983/solr/blacklight-core \ + -i xml \ + -c traject_config_eac_cpf.rb \ + /path/to/agents/*.xml +``` + +Or integrate into your ArcFlow deployment workflow. + +## Installation + +See the original installation instructions in your deployment documentation. + +## Configuration + +- `.archivessnake.yml` - ArchivesSpace API credentials +- `.arcflow.yml` - Last update timestamp tracking + +## Usage + +```bash +python -m arcflow.main --arclight-dir /path --aspace-dir /path --solr-url http://... [options] +``` + +### Command Line Options + +Required arguments: +- `--arclight-dir` - Path to ArcLight installation directory +- `--aspace-dir` - Path to ArchivesSpace installation directory +- `--solr-url` - URL of the Solr core (e.g., http://localhost:8983/solr/blacklight-core) + +Optional arguments: +- `--force-update` - Force update of all data (recreates everything from scratch) +- `--traject-extra-config` - Path to extra Traject configuration file +- `--agents-only` - Process only agent records, skip collections (useful for testing agents) +- `--collections-only` - Skips creators, processes EAD, PDF finding aid and indexes collections +- `--skip-creator-indexing` - Collects EAC-CPF files only, does not index into Solr +### Examples + +**Normal run (process all collections and agents):** +```bash +python -m arcflow.main \ + --arclight-dir /path/to/arclight \ + --aspace-dir /path/to/archivesspace \ + --solr-url http://localhost:8983/solr/blacklight-core +``` + +**Process only agents (skip collections):** +```bash +python -m arcflow.main \ + --arclight-dir /path/to/arclight \ + --aspace-dir /path/to/archivesspace \ + --solr-url http://localhost:8983/solr/blacklight-core \ + --agents-only +``` + +**Force full update:** +```bash +python -m arcflow.main \ + --arclight-dir /path/to/arclight \ + --aspace-dir /path/to/archivesspace \ + --solr-url http://localhost:8983/solr/blacklight-core \ + --force-update +``` + +See `--help` for all available options. \ No newline at end of file diff --git a/arcflow/main.py b/arcflow/main.py index a8621fa..292d049 100644 --- a/arcflow/main.py +++ b/arcflow/main.py @@ -9,12 +9,14 @@ import re import logging import math +import sys from xml.dom.pulldom import parse, START_ELEMENT from xml.sax.saxutils import escape as xml_escape +from xml.etree import ElementTree as ET from datetime import datetime, timezone from asnake.client import ASnakeClient from multiprocessing.pool import ThreadPool as Pool -from utils.stage_classifications import extract_labels +from .utils.stage_classifications import extract_labels base_dir = os.path.abspath((__file__) + "/../../") @@ -38,14 +40,19 @@ class ArcFlow: """ - def __init__(self, arclight_dir, aspace_dir, solr_url, traject_extra_config='', force_update=False): + def __init__(self, arclight_dir, aspace_dir, solr_url, traject_extra_config='', force_update=False, agents_only=False, collections_only=False, arcuit_dir=None, skip_creator_indexing=False): self.solr_url = solr_url self.batch_size = 1000 - self.traject_extra_config = f'-c {traject_extra_config}' if traject_extra_config.strip() else '' + clean_extra_config = traject_extra_config.strip() + self.traject_extra_config = clean_extra_config or None self.arclight_dir = arclight_dir self.aspace_jobs_dir = f'{aspace_dir}/data/shared/job_files' self.job_type = 'print_to_pdf_job' self.force_update = force_update + self.agents_only = agents_only + self.collections_only = collections_only + self.arcuit_dir = arcuit_dir + self.skip_creator_indexing = skip_creator_indexing self.log = logging.getLogger('arcflow') self.pid = os.getpid() self.pid_file_path = os.path.join(base_dir, 'arcflow.pid') @@ -395,6 +402,7 @@ def update_eads(self): pdf_dir = f'{self.arclight_dir}/public/pdf' modified_since = int(self.last_updated.timestamp()) + if self.force_update or modified_since <= 0: modified_since = 0 # delete all EADs and related files in ArcLight Solr @@ -454,7 +462,7 @@ def update_eads(self): # Tasks for indexing pending resources results_3 = [pool.apply_async( - self.index, + self.index_collections, args=(repo_id, f'{xml_dir}/{repo_id}_*_batch_{batch_num}.xml', indent_size)) for repo_id, batch_num in batches] @@ -527,17 +535,58 @@ def update_eads(self): page += 1 - def index(self, repo_id, xml_file_path, indent_size=0): + def index_collections(self, repo_id, xml_file_path, indent_size=0): + """Index collection XML files to Solr using traject.""" indent = ' ' * indent_size self.log.info(f'{indent}Indexing pending resources in repository ID {repo_id} to ArcLight Solr...') try: + # Get arclight traject config path + result_show = subprocess.run( + ['bundle', 'show', 'arclight'], + capture_output=True, + text=True, + cwd=self.arclight_dir + ) + arclight_path = result_show.stdout.strip() if result_show.returncode == 0 else '' + + if not arclight_path: + self.log.error(f'{indent}Could not find arclight gem path') + return + + traject_config = f'{arclight_path}/lib/arclight/traject/ead2_config.rb' + + cmd = [ + 'bundle', 'exec', 'traject', + '-u', self.solr_url, + '-s', 'processing_thread_pool=8', + '-s', 'solr_writer.thread_pool=8', + '-s', f'solr_writer.batch_size={self.batch_size}', + '-s', 'solr_writer.commit_on_close=true', + '-i', 'xml', + '-c', traject_config + ] + + if self.traject_extra_config: + if isinstance(self.traject_extra_config, (list, tuple)): + cmd.extend(self.traject_extra_config) + else: + # Treat a string extra config as a path and pass it with -c + cmd.extend(['-c', self.traject_extra_config]) + + cmd.append(xml_file_path) + + env = os.environ.copy() + env['REPOSITORY_ID'] = str(repo_id) + result = subprocess.run( - f'REPOSITORY_ID={repo_id} bundle exec traject -u {self.solr_url} -s processing_thread_pool=8 -s solr_writer.thread_pool=8 -s solr_writer.batch_size={self.batch_size} -s solr_writer.commit_on_close=true -i xml -c $(bundle show arclight)/lib/arclight/traject/ead2_config.rb {self.traject_extra_config} {xml_file_path}', -# f'FILE={xml_file_path} SOLR_URL={self.solr_url} REPOSITORY_ID={repo_id} TRAJECT_SETTINGS="processing_thread_pool=8 solr_writer.thread_pool=8 solr_writer.batch_size=1000 solr_writer.commit_on_close=false" bundle exec rake arcuit:index', - shell=True, + cmd, cwd=self.arclight_dir, - stderr=subprocess.PIPE,) - self.log.error(f'{indent}{result.stderr.decode("utf-8")}') + env=env, + stderr=subprocess.PIPE, + ) + + if result.stderr: + self.log.error(f'{indent}{result.stderr.decode("utf-8")}') if result.returncode != 0: self.log.error(f'{indent}Failed to index pending resources in repository ID {repo_id} to ArcLight Solr. Return code: {result.returncode}') else: @@ -625,6 +674,324 @@ def get_creator_bioghist(self, resource, indent_size=0): return None + def get_all_agents(self, agent_types=None, modified_since=0, indent_size=0): + """ + Fetch ALL agents from ArchivesSpace (not just creators). + Uses direct agent API endpoints for comprehensive coverage. + + Args: + agent_types: List of agent types to fetch. Default: ['corporate_entities', 'people', 'families'] + modified_since: Unix timestamp to filter agents modified since this time (if API supports it) + indent_size: Indentation size for logging + + Returns: + set: Set of agent URIs (e.g., '/agents/corporate_entities/123') + """ + if agent_types is None: + agent_types = ['corporate_entities', 'people', 'families'] + + indent = ' ' * indent_size + all_agents = set() + + self.log.info(f'{indent}Fetching ALL agents from ArchivesSpace...') + + for agent_type in agent_types: + try: + # Try with modified_since parameter first + params = {'all_ids': True} + if modified_since > 0: + params['modified_since'] = modified_since + + response = self.client.get(f'/agents/{agent_type}', params=params) + agent_ids = response.json() + + self.log.info(f'{indent}Found {len(agent_ids)} {agent_type} agents') + + # Add agent URIs to set + for agent_id in agent_ids: + agent_uri = f'/agents/{agent_type}/{agent_id}' + all_agents.add(agent_uri) + + except Exception as e: + self.log.error(f'{indent}Error fetching {agent_type} agents: {e}') + # If modified_since fails, try without it + if modified_since > 0: + self.log.warning(f'{indent}Retrying {agent_type} without modified_since filter...') + try: + response = self.client.get(f'/agents/{agent_type}', params={'all_ids': True}) + agent_ids = response.json() + self.log.info(f'{indent}Found {len(agent_ids)} {agent_type} agents (no date filter)') + for agent_id in agent_ids: + agent_uri = f'/agents/{agent_type}/{agent_id}' + all_agents.add(agent_uri) + except Exception as e2: + self.log.error(f'{indent}Failed to fetch {agent_type} agents: {e2}') + + self.log.info(f'{indent}Found {len(all_agents)} total agents across all types.') + return all_agents + + + def task_agent(self, agent_uri, agents_dir, repo_id=1, indent_size=0): + """ + Process a single agent and generate a creator document in EAC-CPF XML format. + Retrieves EAC-CPF directly from ArchivesSpace archival_contexts endpoint. + + Args: + agent_uri: Agent URI from ArchivesSpace (e.g., '/agents/corporate_entities/123') + agents_dir: Directory to save agent XML files + repo_id: Repository ID to use for archival_contexts endpoint (default: 1) + indent_size: Indentation size for logging + + Returns: + str: Creator document ID if successful, None otherwise + """ + indent = ' ' * indent_size + + try: + # Parse agent URI to extract type and ID + # URI format: /agents/{agent_type}/{id} + parts = agent_uri.strip('/').split('/') + if len(parts) != 3 or parts[0] != 'agents': + self.log.error(f'{indent}Invalid agent URI format: {agent_uri}') + return None + + agent_type = parts[1] # e.g., 'corporate_entities', 'people', 'families' + agent_id = parts[2] + + # Construct EAC-CPF endpoint + # Format: /repositories/{repo_id}/archival_contexts/{agent_type}/{id}.xml + eac_cpf_endpoint = f'/repositories/{repo_id}/archival_contexts/{agent_type}/{agent_id}.xml' + + self.log.debug(f'{indent}Fetching EAC-CPF from: {eac_cpf_endpoint}') + + # Fetch EAC-CPF XML + response = self.client.get(eac_cpf_endpoint) + + if response.status_code != 200: + self.log.error(f'{indent}Failed to fetch EAC-CPF for {agent_uri}: HTTP {response.status_code}') + return None + + eac_cpf_xml = response.text + + # Parse the EAC-CPF XML to validate and inspect its structure + try: + root = ET.fromstring(eac_cpf_xml) + self.log.debug(f'{indent}Parsed EAC-CPF XML root element: {root.tag}') + except ET.ParseError as e: + self.log.error(f'{indent}Failed to parse EAC-CPF XML for {agent_uri}: {e}') + return None + + # Generate creator ID + creator_id = f'creator_{agent_type}_{agent_id}' + + # Save EAC-CPF XML to file + filename = f'{agents_dir}/{creator_id}.xml' + with open(filename, 'w', encoding='utf-8') as f: + f.write(eac_cpf_xml) + + self.log.info(f'{indent}Created creator document: {creator_id}') + return creator_id + + except Exception as e: + self.log.error(f'{indent}Error processing agent {agent_uri}: {e}') + import traceback + self.log.error(f'{indent}{traceback.format_exc()}') + return None + + def process_creators(self): + """ + Process creator agents and generate standalone creator documents. + + Returns: + list: List of created creator document IDs + """ + + xml_dir = f'{self.arclight_dir}/public/xml' + agents_dir = f'{xml_dir}/agents' + modified_since = int(self.last_updated.timestamp()) + indent_size = 0 + indent = ' ' * indent_size + + self.log.info(f'{indent}Processing creator agents...') + + # Create agents directory if it doesn't exist + os.makedirs(agents_dir, exist_ok=True) + + # Get agents to process + agents = self.get_all_agents(modified_since=modified_since, indent_size=indent_size) + + # Process agents in parallel + with Pool(processes=10) as pool: + results_agents = [pool.apply_async( + self.task_agent, + args=(agent_uri_item, agents_dir, 1, indent_size)) # Use repo_id=1 + for agent_uri_item in agents] + + creator_ids = [r.get() for r in results_agents] + creator_ids = [cid for cid in creator_ids if cid is not None] + + self.log.info(f'{indent}Created {len(creator_ids)} creator documents.') + + # NOTE: Collection links are NOT added to creator XML files. + # Instead, linking is handled via Solr using the persistent_id field: + # - Creator bioghist has persistent_id as the 'id' attribute + # - Collection EADs reference creators via bioghist with persistent_id + # - Solr indexes both, allowing queries to link them + # This avoids the expensive operation of scanning all resources to build a linkage map. + + # Index creators to Solr (if not skipped) + if not self.skip_creator_indexing and creator_ids: + self.log.info(f'{indent}Indexing {len(creator_ids)} creator records to Solr...') + traject_config = self.find_traject_config() + if traject_config: + indexed = self.index_creators(agents_dir, creator_ids) + self.log.info(f'{indent}Creator indexing complete: {indexed}/{len(creator_ids)} indexed') + else: + self.log.info(f'{indent}Skipping creator indexing (traject config not found)') + self.log.info(f'{indent}To index manually:') + self.log.info(f'{indent} cd {self.arclight_dir}') + self.log.info(f'{indent} bundle exec traject -u {self.solr_url} -i xml \\') + self.log.info(f'{indent} -c /path/to/arcuit/arcflow/traject_config_eac_cpf.rb \\') + self.log.info(f'{indent} {agents_dir}/*.xml') + elif self.skip_creator_indexing: + self.log.info(f'{indent}Skipping creator indexing (--skip-creator-indexing flag set)') + + return creator_ids + + + def find_traject_config(self): + """ + Find the traject config for creator indexing. + + Tries: + 1. bundle show arcuit (finds installed gem) + 2. self.arcuit_dir (explicit path) + 3. Returns None if neither works + + Returns: + str: Path to traject config, or None if not found + """ + # Try bundle show arcuit first + try: + result = subprocess.run( + ['bundle', 'show', 'arcuit'], + cwd=self.arclight_dir, + capture_output=True, + text=True, + timeout=10 + ) + if result.returncode == 0: + arcuit_path = result.stdout.strip() + # Prefer config at gem root, fall back to legacy subdirectory layout + candidate_paths = [ + os.path.join(arcuit_path, 'traject_config_eac_cpf.rb'), + os.path.join(arcuit_path, 'arcflow', 'traject_config_eac_cpf.rb'), + ] + for traject_config in candidate_paths: + if os.path.exists(traject_config): + self.log.info(f'Found traject config via bundle show: {traject_config}') + return traject_config + self.log.warning( + 'bundle show arcuit succeeded but traject_config_eac_cpf.rb ' + 'was not found in any expected location under the gem root' + ) + else: + self.log.debug('bundle show arcuit failed (gem not installed?)') + except Exception as e: + self.log.debug(f'Error running bundle show arcuit: {e}') + # Fall back to arcuit_dir if provided + if self.arcuit_dir: + candidate_paths = [ + os.path.join(self.arcuit_dir, 'traject_config_eac_cpf.rb'), + os.path.join(self.arcuit_dir, 'arcflow', 'traject_config_eac_cpf.rb'), + ] + for traject_config in candidate_paths: + if os.path.exists(traject_config): + self.log.info(f'Using traject config from arcuit_dir: {traject_config}') + return traject_config + self.log.warning( + 'arcuit_dir provided but traject_config_eac_cpf.rb was not found ' + 'in any expected location' + ) + # No config found + self.log.warning('Could not find traject config (bundle show arcuit failed and arcuit_dir not provided or invalid)') + return None + + + def index_creators(self, agents_dir, creator_ids, batch_size=100): + """ + Index creator XML files to Solr using traject. + + Args: + agents_dir: Directory containing creator XML files + creator_ids: List of creator IDs to index + batch_size: Number of files to index per traject call (default: 100) + + Returns: + int: Number of successfully indexed creators + """ + traject_config = self.find_traject_config() + if not traject_config: + return 0 + + indexed_count = 0 + failed_count = 0 + + # Process in batches to avoid command line length limits + total_batches = math.ceil(len(creator_ids) / batch_size) + for i in range(0, len(creator_ids), batch_size): + batch = creator_ids[i:i+batch_size] + batch_num = (i // batch_size) + 1 + + # Build list of XML files for this batch + xml_files = [f'{agents_dir}/{cid}.xml' for cid in batch] + + # Filter to only existing files + existing_files = [f for f in xml_files if os.path.exists(f)] + + if not existing_files: + self.log.warning(f' Batch {batch_num}/{total_batches}: No files found, skipping') + continue + + try: + cmd = [ + 'bundle', 'exec', 'traject', + '-u', self.solr_url, + '-i', 'xml', + '-c', traject_config + ] + existing_files + + self.log.info(f' Indexing batch {batch_num}/{total_batches}: {len(existing_files)} files') + + result = subprocess.run( + cmd, + cwd=self.arclight_dir, + stderr=subprocess.PIPE, + timeout=300 # 5 minute timeout per batch + ) + + if result.returncode == 0: + indexed_count += len(existing_files) + self.log.info(f' Successfully indexed {len(existing_files)} creators') + else: + failed_count += len(existing_files) + self.log.error(f' Traject failed with exit code {result.returncode}') + if result.stderr: + self.log.error(f' STDERR: {result.stderr.decode("utf-8")}') + + except subprocess.TimeoutExpired: + self.log.error(f' Traject timed out for batch {batch_num}/{total_batches}') + failed_count += len(existing_files) + except Exception as e: + self.log.error(f' Error indexing batch {batch_num}/{total_batches}: {e}') + failed_count += len(existing_files) + + if failed_count > 0: + self.log.warning(f'Creator indexing completed with errors: {indexed_count} succeeded, {failed_count} failed') + + return indexed_count + + def get_repo_id(self, repo): """ Get the repository ID from the repository URI. @@ -753,11 +1120,24 @@ def run(self): Run the ArcFlow process. """ self.log.info(f'ArcFlow process started (PID: {self.pid}).') - self.update_repositories() - self.update_eads() + + # Update repositories (unless agents-only mode) + if not self.agents_only: + self.update_repositories() + + # Update collections/EADs (unless agents-only mode) + if not self.agents_only: + self.update_eads() + + # Update creator records (unless collections-only mode) + if not self.collections_only: + self.process_creators() + self.save_config_file() self.log.info(f'ArcFlow process completed (PID: {self.pid}). Elapsed time: {time.strftime("%H:%M:%S", time.gmtime(int(time.time()) - self.start_time))}.') + + def main(): parser = argparse.ArgumentParser(description='ArcFlow') @@ -781,14 +1161,38 @@ def main(): '--traject-extra-config', default='', help='Path to extra Traject configuration file',) + parser.add_argument( + '--agents-only', + action='store_true', + help='Process only agent records, skip collections (for testing)',) + parser.add_argument( + '--collections-only', + action='store_true', + help='Process only repositories and collections, skip creator processing',) + parser.add_argument( + '--arcuit-dir', + default=None, + help='Path to arcuit repository (for traject config). If not provided, will try bundle show arcuit.',) + parser.add_argument( + '--skip-creator-indexing', + action='store_true', + help='Generate creator XML files but skip Solr indexing (for testing)',) args = parser.parse_args() + + # Validate mutually exclusive flags + if args.agents_only and args.collections_only: + parser.error('Cannot use both --agents-only and --collections-only') arcflow = ArcFlow( arclight_dir=args.arclight_dir, aspace_dir=args.aspace_dir, solr_url=args.solr_url, traject_extra_config=args.traject_extra_config, - force_update=args.force_update) + force_update=args.force_update, + agents_only=args.agents_only, + collections_only=args.collections_only, + arcuit_dir=args.arcuit_dir, + skip_creator_indexing=args.skip_creator_indexing) arcflow.run() diff --git a/traject_config_eac_cpf.rb b/traject_config_eac_cpf.rb new file mode 100644 index 0000000..62c9a5a --- /dev/null +++ b/traject_config_eac_cpf.rb @@ -0,0 +1,275 @@ +# Traject configuration for indexing EAC-CPF creator records to Solr +# +# This config file processes EAC-CPF (Encoded Archival Context - Corporate Bodies, +# Persons, and Families) XML documents from ArchivesSpace archival_contexts endpoint. +# +# Usage: +# bundle exec traject -u $SOLR_URL -c traject_config_eac_cpf.rb /path/to/agents/*.xml +# +# The EAC-CPF XML documents are retrieved directly from ArchivesSpace via: +# /repositories/{repo_id}/archival_contexts/{agent_type}/{id}.xml + +require 'traject' +require 'traject_plus' +require 'traject_plus/macros' +require 'time' + +# Use TrajectPlus macros (provides extract_xpath and other helpers) +extend TrajectPlus::Macros + +# EAC-CPF namespace - used consistently throughout this config +EAC_NS = { 'eac' => 'urn:isbn:1-931666-33-4' } + +settings do + provide "solr.url", ENV['SOLR_URL'] || "http://localhost:8983/solr/blacklight-core" + provide "solr_writer.commit_on_close", "true" + provide "solr_writer.thread_pool", "8" + provide "solr_writer.batch_size", "100" + provide "processing_thread_pool", "4" + + # Use NokogiriReader for XML processing + provide "reader_class_name", "Traject::NokogiriReader" +end + +# Each record from reader +each_record do |record, context| + context.clipboard[:is_creator] = true +end + +# Core identity field +# CRITICAL: The 'id' field is required by Solr's schema (uniqueKey) +# Must ensure this field is never empty or indexing will fail +# +# IMPORTANT: Real EAC-CPF from ArchivesSpace has empty <control/> element! +# Cannot rely on recordId being present. Must extract from filename or generate. +to_field 'id' do |record, accumulator, context| + # Try 1: Extract from control/recordId (if present) + record_id = record.xpath('//eac:control/eac:recordId', EAC_NS).first + record_id ||= record.xpath('//control/recordId').first + + if record_id && !record_id.text.strip.empty? + accumulator << record_id.text.strip + else + # Try 2: Extract from source filename (most reliable for ArchivesSpace exports) + # Filename format: creator_corporate_entities_584.xml or similar + source_file = context.source_record_id || context.input_name + if source_file + # Remove .xml extension and any path + id_from_filename = File.basename(source_file, '.xml') + # Check if it looks valid (starts with creator_ or agent_) + if id_from_filename =~ /^(creator_|agent_)/ + accumulator << id_from_filename + context.logger.info("Using filename-based ID: #{id_from_filename}") + else + # Try 3: Generate from entity type and name + entity_type = record.xpath('//eac:cpfDescription/eac:identity/eac:entityType', EAC_NS).first&.text&.strip + name_entry = record.xpath('//eac:cpfDescription/eac:identity/eac:nameEntry/eac:part', EAC_NS).first&.text&.strip + + if entity_type && name_entry + # Create stable ID from type and name + type_short = case entity_type + when 'corporateBody' then 'corporate' + when 'person' then 'person' + when 'family' then 'family' + else 'entity' + end + name_id = name_entry.gsub(/[^a-z0-9]/i, '_').downcase[0..50] # Limit length + generated_id = "creator_#{type_short}_#{name_id}" + accumulator << generated_id + context.logger.warn("Generated ID from name: #{generated_id}") + else + # Last resort: timestamp-based unique ID + fallback_id = "creator_unknown_#{Time.now.to_i}_#{rand(10000)}" + accumulator << fallback_id + context.logger.error("Using fallback ID: #{fallback_id}") + end + end + else + # No filename available, generate from name + entity_type = record.xpath('//eac:cpfDescription/eac:identity/eac:entityType', EAC_NS).first&.text&.strip + name_entry = record.xpath('//eac:cpfDescription/eac:identity/eac:nameEntry/eac:part', EAC_NS).first&.text&.strip + + if entity_type && name_entry + type_short = case entity_type + when 'corporateBody' then 'corporate' + when 'person' then 'person' + when 'family' then 'family' + else 'entity' + end + name_id = name_entry.gsub(/[^a-z0-9]/i, '_').downcase[0..50] + generated_id = "creator_#{type_short}_#{name_id}" + accumulator << generated_id + context.logger.warn("Generated ID from name: #{generated_id}") + else + # Absolute last resort + fallback_id = "creator_unknown_#{Time.now.to_i}_#{rand(10000)}" + accumulator << fallback_id + context.logger.error("Using fallback ID: #{fallback_id}") + end + end + end +end + +# Add is_creator marker field +to_field 'is_creator' do |record, accumulator| + accumulator << 'true' +end + +# Record type +to_field 'record_type' do |record, accumulator| + accumulator << 'creator' +end + +# Entity type (corporateBody, person, family) +to_field 'entity_type' do |record, accumulator| + entity = record.xpath('//eac:cpfDescription/eac:identity/eac:entityType', EAC_NS).first + accumulator << entity.text if entity +end + +# Title/name fields - using ArcLight dynamic field naming convention +# _tesim = text, stored, indexed, multiValued (for full-text search) +# _ssm = string, stored, multiValued (for display) +# _ssi = string, stored, indexed (for faceting/sorting) +to_field 'title_tesim' do |record, accumulator| + name = record.xpath('//eac:cpfDescription/eac:identity/eac:nameEntry/eac:part', EAC_NS) + accumulator << name.map(&:text).join(' ') if name.any? +end + +to_field 'title_ssm' do |record, accumulator| + name = record.xpath('//eac:cpfDescription/eac:identity/eac:nameEntry/eac:part', EAC_NS) + accumulator << name.map(&:text).join(' ') if name.any? +end + +to_field 'title_filing_ssi' do |record, accumulator| + name = record.xpath('//eac:cpfDescription/eac:identity/eac:nameEntry/eac:part', EAC_NS) + if name.any? + text = name.map(&:text).join(' ') + # Remove leading articles and convert to lowercase for filing + accumulator << text.gsub(/^(a|an|the)\s+/i, '').downcase + end +end + +# Dates of existence - using ArcLight standard field unitdate_ssm +# (matches what ArcLight uses for collection dates) +to_field 'unitdate_ssm' do |record, accumulator| + # Try existDates element + base_path = '//eac:cpfDescription/eac:description/eac:existDates' + dates = record.xpath("#{base_path}/eac:dateRange/eac:fromDate | #{base_path}/eac:dateRange/eac:toDate | #{base_path}/eac:date", EAC_NS) + if dates.any? + from_date = record.xpath("#{base_path}/eac:dateRange/eac:fromDate", EAC_NS).first + to_date = record.xpath("#{base_path}/eac:dateRange/eac:toDate", EAC_NS).first + + if from_date || to_date + from_text = from_date ? from_date.text : '' + to_text = to_date ? to_date.text : '' + accumulator << "#{from_text}-#{to_text}".gsub(/^-|-$/, '') + else + # Single date + dates.each { |d| accumulator << d.text } + end + end +end + +# Biographical/historical note - using ArcLight conventions +# _tesim for searchable plain text +# _tesm for searchable HTML (text, stored, multiValued but not for display) +# _ssm for section heading display +to_field 'bioghist_tesim' do |record, accumulator| + # Extract text from biogHist elements for full-text search + bioghist = record.xpath('//eac:cpfDescription/eac:description/eac:biogHist//eac:p', EAC_NS) + if bioghist.any? + text = bioghist.map(&:text).join(' ') + accumulator << text + end +end + +# Biographical/historical note - HTML +to_field 'bioghist_html_tesm' do |record, accumulator| + # Extract HTML for searchable content (matches ArcLight's bioghist_html_tesm) + bioghist = record.xpath('//eac:cpfDescription/eac:description/eac:biogHist//eac:p', EAC_NS) + if bioghist.any? + html = bioghist.map { |p| "<p>#{p.text}</p>" }.join("\n") + accumulator << html + end +end + +to_field 'bioghist_heading_ssm' do |record, accumulator| + # Extract section heading (matches ArcLight's bioghist_heading_ssm pattern) + heading = record.xpath('//eac:cpfDescription/eac:description/eac:biogHist//eac:head', EAC_NS).first + accumulator << heading.text if heading +end + +# Full-text search field +to_field 'text' do |record, accumulator| + # Title + name = record.xpath('//eac:cpfDescription/eac:identity/eac:nameEntry/eac:part', EAC_NS) + accumulator << name.map(&:text).join(' ') if name.any? + + # Bioghist + bioghist = record.xpath('//eac:cpfDescription/eac:description/eac:biogHist//eac:p', EAC_NS) + accumulator << bioghist.map(&:text).join(' ') if bioghist.any? +end + +# Related agents (from cpfRelation elements) +to_field 'related_agents_ssim' do |record, accumulator| + relations = record.xpath('//eac:cpfDescription/eac:relations/eac:cpfRelation', EAC_NS) + relations.each do |rel| + # Get the related entity href/identifier + href = rel['href'] || rel['xlink:href'] + relation_type = rel['cpfRelationType'] + + if href + # Store as: "uri|type" for easy parsing later + accumulator << "#{href}|#{relation_type}" + elsif relation_entry = rel.xpath('eac:relationEntry', EAC_NS).first + # If no href, at least store the name + name = relation_entry.text + accumulator << "#{name}|#{relation_type}" if name + end + end +end + +# Related agents - just URIs (for simpler queries) +to_field 'related_agent_uris_ssim' do |record, accumulator| + relations = record.xpath('//eac:cpfDescription/eac:relations/eac:cpfRelation', EAC_NS) + relations.each do |rel| + href = rel['href'] || rel['xlink:href'] + accumulator << href if href + end +end + +# Relationship types +to_field 'relationship_types_ssim' do |record, accumulator| + relations = record.xpath('//eac:cpfDescription/eac:relations/eac:cpfRelation', EAC_NS) + relations.each do |rel| + relation_type = rel['cpfRelationType'] + accumulator << relation_type if relation_type && !accumulator.include?(relation_type) + end +end + +# Agent source URI (from original ArchivesSpace) +to_field 'agent_uri' do |record, accumulator| + # Try to extract from control section or otherRecordId + other_id = record.xpath('//eac:control/eac:otherRecordId[@localType="archivesspace_uri"]', EAC_NS).first + if other_id + accumulator << other_id.text + end +end + +# Timestamp +to_field 'timestamp' do |record, accumulator| + accumulator << Time.now.utc.iso8601 +end + +# Document type marker +to_field 'document_type' do |record, accumulator| + accumulator << 'creator' +end + +# Log successful indexing +each_record do |record, context| + record_id = record.xpath('//eac:control/eac:recordId', EAC_NS).first + if record_id + context.logger.info("Indexed creator: #{record_id.text}") + end +end From 50ad7667732a19059d34e91f01d69f92b544e2de Mon Sep 17 00:00:00 2001 From: Alex Dryden <adryden3@illinois.edu> Date: Fri, 20 Feb 2026 21:00:13 -0500 Subject: [PATCH 03/10] feat: Optimize agent filtering with ArchivesSpace Solr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace per-agent API calls with single Solr query for better performance: - Query ArchivesSpace Solr to filter agents in bulk - Exclude system users (publish=false) - Exclude donors (linked_agent_role includes "dnr") - Exclude software agents (agent_type="agent_software") - Use consistent EAC namespace prefixes in XPath queries - Refactor dates extraction for improved readability Performance improvement: O(n) API calls → O(1) Solr query Reduces processing time from minutes to seconds for large repositories. to reflect the required command line arguments Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- README.md | 63 ++- arcflow/main.py | 407 ++++++++++++------ ...pf.rb => example_traject_config_eac_cpf.rb | 7 +- 3 files changed, 334 insertions(+), 143 deletions(-) rename traject_config_eac_cpf.rb => example_traject_config_eac_cpf.rb (97%) diff --git a/README.md b/README.md index 6c570bf..2a42f18 100644 --- a/README.md +++ b/README.md @@ -14,14 +14,12 @@ pip install -r requirements.txt cp .archivessnake.yml.example .archivessnake.yml nano .archivessnake.yml # Add your ArchivesSpace credentials -# 3. Set environment variables -export ARCLIGHT_DIR=/path/to/your/arclight-app -export ASPACE_DIR=/path/to/your/archivesspace -export SOLR_URL=http://localhost:8983/solr/blacklight-core - -# 4. Run arcflow -python -m arcflow.main - +# 3. Run arcflow +python -m arcflow.main \ + --arclight-dir /path/to/your/arclight-app \ + --aspace-dir /path/to/your/archivesspace \ + --solr-url http://localhost:8983/solr/blacklight-core \ + --aspace-solr-url http://localhost:8983/solr/archivesspace ``` --- @@ -42,14 +40,32 @@ ArcFlow now generates standalone creator documents in addition to collection rec - Link to all collections where the creator is listed - Can be searched and displayed independently in ArcLight - Are marked with `is_creator: true` to distinguish from collections -- Must be fed into a Solr instance with fields to match their specific facets (See:Configure Solr Schema below ) +- Must be fed into a Solr instance with fields to match their specific facets (See: Configure Solr Schema below) + +### Agent Filtering + +**ArcFlow automatically filters agents to include only legitimate creators** of archival materials. The following agent types are **excluded** from indexing: + +- ✗ **System users** - ArchivesSpace software users (identified by `is_user` field) +- ✗ **System-generated agents** - Auto-created for users (identified by `system_generated` field) +- ✗ **Software agents** - Excluded by not querying the `/agents/software` endpoint +- ✗ **Repository agents** - Corporate entities representing the repository itself (identified by `is_repo_agent` field) +- ✗ **Donor-only agents** - Agents with only the 'donor' role and no creator role + +**Agents are included if they meet any of these criteria:** + +- ✓ Have the **'creator' role** in linked_agent_roles +- ✓ Are **linked to published records** (and not excluded by filters above) + +This filtering ensures that only legitimate archival creators are discoverable in ArcLight, while protecting privacy and security by excluding system users and donors. ### How Creator Records Work 1. **Extraction**: `get_all_agents()` fetches all agents from ArchivesSpace -2. **Processing**: `task_agent()` generates an EAC-CPF XML document for each agent with bioghist notes -3. **Linking**: Handled via Solr using the persistent_id field (agents and collections linked through bioghist references) -4. **Indexing**: Creator XML files are indexed to Solr using `traject_config_eac_cpf.rb` +2. **Filtering**: `is_target_agent()` filters out system users, donors, and non-creator agents +3. **Processing**: `task_agent()` generates an EAC-CPF XML document for each target agent with bioghist notes +4. **Linking**: Handled via Solr using the persistent_id field (agents and collections linked through bioghist references) +5. **Indexing**: Creator XML files are indexed to Solr using `traject_config_eac_cpf.rb` ### Creator Document Format @@ -119,7 +135,22 @@ This is a **one-time setup** per Solr instance. --- -To index creator documents to Solr: +### Traject Configuration for Creator Indexing + +The `traject_config_eac_cpf.rb` file defines how EAC-CPF creator records are mapped to Solr fields. + +**Search Order**: arcflow searches for the traject config following the collection records pattern: +1. **arcuit_dir parameter** (if provided via `--arcuit-dir`) - Highest priority, most up-to-date user control +2. **arcuit gem** (via `bundle show arcuit`) - For backward compatibility when arcuit_dir not provided +3. **example_traject_config_eac_cpf.rb** in arcflow - Fallback for module usage without arcuit + +**Example File**: arcflow includes `example_traject_config_eac_cpf.rb` as a reference implementation. For production: +- Copy this file to your arcuit gem as `traject_config_eac_cpf.rb`, or +- Specify the location with `--arcuit-dir /path/to/arcuit` + +**Logging**: arcflow clearly logs which traject config file is being used when creator indexing runs. + +To index creator documents to Solr manually: ```bash bundle exec traject \ @@ -166,7 +197,9 @@ Optional arguments: python -m arcflow.main \ --arclight-dir /path/to/arclight \ --aspace-dir /path/to/archivesspace \ - --solr-url http://localhost:8983/solr/blacklight-core + --solr-url http://localhost:8983/solr/blacklight-core \ + --aspace-solr-url http://localhost:8983/solr/archivesspace + ``` **Process only agents (skip collections):** @@ -175,6 +208,7 @@ python -m arcflow.main \ --arclight-dir /path/to/arclight \ --aspace-dir /path/to/archivesspace \ --solr-url http://localhost:8983/solr/blacklight-core \ + --aspace-solr-url http://localhost:8983/solr/archivesspace \ --agents-only ``` @@ -184,6 +218,7 @@ python -m arcflow.main \ --arclight-dir /path/to/arclight \ --aspace-dir /path/to/archivesspace \ --solr-url http://localhost:8983/solr/blacklight-core \ + --aspace-solr-url http://localhost:8983/solr/archivesspace \ --force-update ``` diff --git a/arcflow/main.py b/arcflow/main.py index 292d049..bf9375b 100644 --- a/arcflow/main.py +++ b/arcflow/main.py @@ -16,7 +16,7 @@ from datetime import datetime, timezone from asnake.client import ASnakeClient from multiprocessing.pool import ThreadPool as Pool -from .utils.stage_classifications import extract_labels +from utils.stage_classifications import extract_labels base_dir = os.path.abspath((__file__) + "/../../") @@ -40,8 +40,9 @@ class ArcFlow: """ - def __init__(self, arclight_dir, aspace_dir, solr_url, traject_extra_config='', force_update=False, agents_only=False, collections_only=False, arcuit_dir=None, skip_creator_indexing=False): + def __init__(self, arclight_dir, aspace_dir, solr_url, aspace_solr_url, traject_extra_config='', force_update=False, agents_only=False, collections_only=False, arcuit_dir=None, skip_creator_indexing=False): self.solr_url = solr_url + self.aspace_solr_url = aspace_solr_url self.batch_size = 1000 clean_extra_config = traject_extra_config.strip() self.traject_extra_config = clean_extra_config or None @@ -217,6 +218,9 @@ def task_resource(self, repo, resource_id, xml_dir, pdf_dir, indent_size=0): 'resolve': ['classifications', 'classification_terms', 'linked_agents'], }).json() + if "ead_id" not in resource: + self.log.error(f'{indent}Resource {resource_id} is missing an ead_id. Skipping.') + return pdf_job xml_file_path = f'{xml_dir}/{resource["ead_id"]}.xml' # replace dots with dashes in EAD ID to avoid issues with Solr @@ -399,6 +403,7 @@ def update_eads(self): ArchivesSpace. """ xml_dir = f'{self.arclight_dir}/public/xml' + resource_dir = f'{xml_dir}/resources' pdf_dir = f'{self.arclight_dir}/public/pdf' modified_since = int(self.last_updated.timestamp()) @@ -412,7 +417,7 @@ def update_eads(self): json={'delete': {'query': '*:*'}}, ) if response.status_code == 200: - self.log.info('Deleted all EADs from ArcLight Solr.') + self.log.info('Deleted all EADs and Creators from ArcLight Solr.') # delete related directories after suscessful # deletion from solr for dir_path, dir_name in [(xml_dir, 'XMLs'), (pdf_dir, 'PDFs')]: @@ -424,10 +429,10 @@ def update_eads(self): else: self.log.error(f'Failed to delete all EADs from Arclight Solr. Status code: {response.status_code}') except requests.exceptions.RequestException as e: - self.log.error(f'Error deleting all EADs from ArcLight Solr: {e}') + self.log.error(f'Error deleting all EADs and Creators from ArcLight Solr: {e}') # create directories if don't exist - for dir_path in (xml_dir, pdf_dir): + for dir_path in (resource_dir, pdf_dir): os.makedirs(dir_path, exist_ok=True) # process resources that have been modified in ArchivesSpace since last update @@ -440,7 +445,7 @@ def update_eads(self): # Tasks for processing repositories results_1 = [pool.apply_async( self.task_repository, - args=(repo, xml_dir, modified_since, indent_size)) + args=(repo, resource_dir, modified_since, indent_size)) for repo in repos] # Collect outputs from repository tasks outputs_1 = [r.get() for r in results_1] @@ -448,7 +453,7 @@ def update_eads(self): # Tasks for processing resources results_2 = [pool.apply_async( self.task_resource, - args=(repo, resource_id, xml_dir, pdf_dir, indent_size)) + args=(repo, resource_id, resource_dir, pdf_dir, indent_size)) for repo, resources in outputs_1 for resource_id in resources] # Collect outputs from resource tasks outputs_2 = [r.get() for r in results_2] @@ -463,7 +468,7 @@ def update_eads(self): # Tasks for indexing pending resources results_3 = [pool.apply_async( self.index_collections, - args=(repo_id, f'{xml_dir}/{repo_id}_*_batch_{batch_num}.xml', indent_size)) + args=(repo_id, f'{resource_dir}/{repo_id}_*_batch_{batch_num}.xml', indent_size)) for repo_id, batch_num in batches] # Wait for indexing tasks to complete @@ -472,7 +477,7 @@ def update_eads(self): # Remove pending symlinks after indexing for repo_id, batch_num in batches: - xml_file_path = f'{xml_dir}/{repo_id}_*_batch_{batch_num}.xml' + xml_file_path = f'{resource_dir}/{repo_id}_*_batch_{batch_num}.xml' try: result = subprocess.run( f'rm {xml_file_path}', @@ -495,14 +500,23 @@ def update_eads(self): for r in results_4: r.get() - # processing deleted resources is not needed when - # force-update is set or modified_since is set to 0 - if self.force_update or modified_since <= 0: - self.log.info('Skipping deleted resources processing.') - return + return + + + + def process_deleted_records(self): + + xml_dir = f'{self.arclight_dir}/public/xml' + resource_dir = f'{xml_dir}/resources' + agent_dir = f'{xml_dir}/agents' + pdf_dir = f'{self.arclight_dir}/public/pdf' + modified_since = int(self.last_updated.timestamp()) + + # process records that have been deleted since last update in ArchivesSpace + resource_pattern = r'^/repositories/(?P<repo_id>\d+)/resources/(?P<record_id>\d+)$' + agent_pattern = r'^/agents/(?P<agent_type>people|corporate_entities|families)/(?P<record_id>\d+)$' + - # process resources that have been deleted since last update in ArchivesSpace - pattern = r'^/repositories/(?P<repo_id>\d+)/resources/(?P<resource_id>\d+)$' page = 1 while True: deleted_records = self.client.get( @@ -513,12 +527,13 @@ def update_eads(self): } ).json() for record in deleted_records['results']: - match = re.match(pattern, record) - if match: - resource_id = match.group('resource_id') + resource_match = re.match(resource_pattern, record) + agent_match = re.match(agent_pattern, record) + if resource_match and not self.agents_only: + resource_id = resource_match.group('resource_id') self.log.info(f'{" " * indent_size}Processing deleted resource ID {resource_id}...') - symlink_path = f'{xml_dir}/{resource_id}.xml' + symlink_path = f'{resource_dir}/{resource_id}.xml' ead_id = self.get_ead_from_symlink(symlink_path) if ead_id: self.delete_ead( @@ -530,6 +545,14 @@ def update_eads(self): else: self.log.error(f'{" " * (indent_size+2)}Symlink {symlink_path} not found. Unable to delete the associated EAD from Arclight Solr.') + if agent_match and not self.collections_only: + agent_id = agent_match.group('agent_id') + self.log.info(f'{" " * indent_size}Processing deleted agent ID {agent_id}...') + file_path = f'{agent_dir}/{agent_id}.xml' + agent_solr_id = f'creator_{agent_type}_{agent_id}' + self.delete_creator(file_path, agent_solr_id, indent_size) + + if deleted_records['last_page'] == page: break page += 1 @@ -577,9 +600,9 @@ def index_collections(self, repo_id, xml_file_path, indent_size=0): env = os.environ.copy() env['REPOSITORY_ID'] = str(repo_id) - + cmd_string = ' '.join(cmd) result = subprocess.run( - cmd, + cmd_string, cwd=self.arclight_dir, env=env, stderr=subprocess.PIPE, @@ -673,63 +696,142 @@ def get_creator_bioghist(self, resource, indent_size=0): return '\n'.join(bioghist_elements) return None - - def get_all_agents(self, agent_types=None, modified_since=0, indent_size=0): + def _get_target_agent_criteria(self, modified_since=0): """ - Fetch ALL agents from ArchivesSpace (not just creators). - Uses direct agent API endpoints for comprehensive coverage. - + Defines the Solr query criteria for "target" agents. + These are agents we want to process. + """ + # Basic filters for agents to include + criteria = [ + "system_generated:false", + "is_user:false", + "is_repo_agent:false", + # Include agents that are creators OR are linked to published records + "(linked_agent_roles:creator OR is_linked_to_published_record:true)", + # Exclude agents whose ONLY role is 'donor' + # This logic says: "NOT (role is only donor)" + "(*:* -linked_agent_roles:donor OR (*:* AND linked_agent_roles:[* TO *] AND (*:* -linked_agent_roles:donor)))" + ] + + # Add time filter if applicable + if modified_since > 0 and not self.force_update: + mtime_utc = datetime.fromtimestamp(modified_since, tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ') + criteria.append(f"system_mtime:[{mtime_utc} TO *]") + + return criteria + + def _get_nontarget_agent_criteria(self, modified_since=0): + """ + Defines the Solr query criteria for "non-target" (excluded) agents. + This is the logical inverse of the target criteria. + """ + # The core logic for what makes an agent a "target" + target_logic = " AND ".join([ + "system_generated:false", + "is_user:false", + "is_repo_agent:false", + "(linked_agent_roles:creator OR is_linked_to_published_record:true)", + "(*:* -linked_agent_roles:donor OR (*:* AND linked_agent_roles:[* TO *] AND (*:* -linked_agent_roles:donor)))" + ]) + + # We find non-targets by negating the entire block of target logic + criteria = [f"NOT ({target_logic})"] + + # We still apply the time filter to the overall query + if modified_since > 0 and not self.force_update: + mtime_utc = datetime.fromtimestamp(modified_since, tz=timezone.utc).strftime('%Y-%m-%dT%H:%M:%SZ') + criteria.append(f"system_mtime:[{mtime_utc} TO *]") + + return criteria + + def _execute_solr_query(self, query_parts, solr_url=None, fields=['id'], indent_size=0): + """ + A generic function to execute a query against the Solr index. + Args: - agent_types: List of agent types to fetch. Default: ['corporate_entities', 'people', 'families'] - modified_since: Unix timestamp to filter agents modified since this time (if API supports it) - indent_size: Indentation size for logging - + query_parts (list): A list of strings that will be joined with " AND ". + fields (list): A list of Solr fields to return in the response. + Returns: - set: Set of agent URIs (e.g., '/agents/corporate_entities/123') + list: A list of dictionaries, where each dictionary contains the requested fields. + Returns an empty list on failure. + """ + indent = ' ' * indent_size + if not query_parts: + self.log.error("Cannot execute Solr query with empty criteria.") + return [] + + if not solr_url: + solr_url = self.solr_url + + query_string = " AND ".join(query_parts) + self.log.info(f"{indent}Executing Solr query: {query_string}") + + try: + # First, get the total count of matching documents + count_params = {'q': query_string, 'rows': 0, 'wt': 'json'} + count_response = requests.get(f'{solr_url}/select', params=count_params) + self.log.info(f" [Solr Count Request]: {count_response.request.url}") + + count_response.raise_for_status() + num_found = count_response.json()['response']['numFound'] + + if num_found == 0: + return [] # No need to query again if nothing was found + + # Now, fetch the actual data for the documents + data_params = { + 'q': query_string, + 'rows': num_found, # Use the exact count to fetch all results + 'fl': ','.join(fields), # Join field list into a comma-separated string + 'wt': 'json' + } + response = requests.get(f'{solr_url}/select', params=data_params) + response.raise_for_status() + # Log the exact URL for the data request + self.log.info(f" [Solr Data Request]: {response.request.url}") + + return response.json()['response']['docs'] + + except requests.exceptions.RequestException as e: + self.log.error(f"Failed to execute Solr query: {e}") + self.log.error(f" Failed query string: {query_string}") + return [] + + def get_all_agents(self, agent_types=None, modified_since=0, indent_size=0): + """ + Fetch target agent URIs from the Solr index and log non-target agents. """ if agent_types is None: - agent_types = ['corporate_entities', 'people', 'families'] - + agent_types = ['agent_person', 'agent_corporate_entity', 'agent_family'] + + if self.force_update: + modified_since = 0 indent = ' ' * indent_size - all_agents = set() - - self.log.info(f'{indent}Fetching ALL agents from ArchivesSpace...') - - for agent_type in agent_types: - try: - # Try with modified_since parameter first - params = {'all_ids': True} - if modified_since > 0: - params['modified_since'] = modified_since - - response = self.client.get(f'/agents/{agent_type}', params=params) - agent_ids = response.json() - - self.log.info(f'{indent}Found {len(agent_ids)} {agent_type} agents') - - # Add agent URIs to set - for agent_id in agent_ids: - agent_uri = f'/agents/{agent_type}/{agent_id}' - all_agents.add(agent_uri) - - except Exception as e: - self.log.error(f'{indent}Error fetching {agent_type} agents: {e}') - # If modified_since fails, try without it - if modified_since > 0: - self.log.warning(f'{indent}Retrying {agent_type} without modified_since filter...') - try: - response = self.client.get(f'/agents/{agent_type}', params={'all_ids': True}) - agent_ids = response.json() - self.log.info(f'{indent}Found {len(agent_ids)} {agent_type} agents (no date filter)') - for agent_id in agent_ids: - agent_uri = f'/agents/{agent_type}/{agent_id}' - all_agents.add(agent_uri) - except Exception as e2: - self.log.error(f'{indent}Failed to fetch {agent_type} agents: {e2}') - - self.log.info(f'{indent}Found {len(all_agents)} total agents across all types.') - return all_agents + self.log.info(f'{indent}Fetching agent data from Solr...') + + # Base criteria for all queries in this function + base_criteria = [f"primary_type:({' OR '.join(agent_types)})"] + + # Get and log the non-target agents + nontarget_criteria = base_criteria + self._get_nontarget_agent_criteria(modified_since) + excluded_docs = self._execute_solr_query(nontarget_criteria,self.aspace_solr_url, fields=['id']) + if excluded_docs: + excluded_ids = [doc['id'] for doc in excluded_docs] + self.log.info(f"{indent} Found {len(excluded_ids)} non-target (excluded) agents.") + # Optional: Log the actual IDs if the list isn't too long + # for agent_id in excluded_ids: + # self.log.debug(f"{indent} - Excluded: {agent_id}") + + # Get and return the target agents + target_criteria = base_criteria + self._get_target_agent_criteria(modified_since) + self.log.info('Target Criteria:') + target_docs = self._execute_solr_query(target_criteria, self.aspace_solr_url, fields=['id']) + + target_agents = [doc['id'] for doc in target_docs] + self.log.info(f"{indent} Found {len(target_agents)} target agents to process.") + return target_agents def task_agent(self, agent_uri, agents_dir, repo_id=1, indent_size=0): """ @@ -844,14 +946,15 @@ def process_creators(self): self.log.info(f'{indent}Indexing {len(creator_ids)} creator records to Solr...') traject_config = self.find_traject_config() if traject_config: + self.log.info(f'{indent}Using traject config: {traject_config}') indexed = self.index_creators(agents_dir, creator_ids) self.log.info(f'{indent}Creator indexing complete: {indexed}/{len(creator_ids)} indexed') else: - self.log.info(f'{indent}Skipping creator indexing (traject config not found)') + self.log.warning(f'{indent}Skipping creator indexing (traject config not found)') self.log.info(f'{indent}To index manually:') self.log.info(f'{indent} cd {self.arclight_dir}') self.log.info(f'{indent} bundle exec traject -u {self.solr_url} -i xml \\') - self.log.info(f'{indent} -c /path/to/arcuit/arcflow/traject_config_eac_cpf.rb \\') + self.log.info(f'{indent} -c /path/to/arcuit-gem/traject_config_eac_cpf.rb \\') self.log.info(f'{indent} {agents_dir}/*.xml') elif self.skip_creator_indexing: self.log.info(f'{indent}Skipping creator indexing (--skip-creator-indexing flag set)') @@ -863,15 +966,32 @@ def find_traject_config(self): """ Find the traject config for creator indexing. - Tries: - 1. bundle show arcuit (finds installed gem) - 2. self.arcuit_dir (explicit path) - 3. Returns None if neither works + Search order (follows collection records pattern): + 1. arcuit_dir if provided (most up-to-date user control) + 2. arcuit gem via bundle show (for backward compatibility) + 3. example_traject_config_eac_cpf.rb in arcflow (fallback when used as module without arcuit) Returns: str: Path to traject config, or None if not found """ - # Try bundle show arcuit first + self.log.info('Searching for traject_config_eac_cpf.rb...') + searched_paths = [] + + # Try 1: arcuit_dir if provided (highest priority - user's explicit choice) + if self.arcuit_dir: + self.log.debug(f' Checking arcuit_dir parameter: {self.arcuit_dir}') + candidate_paths = [ + os.path.join(self.arcuit_dir, 'traject_config_eac_cpf.rb'), + os.path.join(self.arcuit_dir, 'lib', 'arcuit', 'traject', 'traject_config_eac_cpf.rb'), + ] + searched_paths.extend(candidate_paths) + for traject_config in candidate_paths: + if os.path.exists(traject_config): + self.log.info(f'✓ Using traject config from arcuit_dir: {traject_config}') + return traject_config + self.log.debug(' traject_config_eac_cpf.rb not found in arcuit_dir') + + # Try 2: bundle show arcuit (for backward compatibility when arcuit_dir not provided) try: result = subprocess.run( ['bundle', 'show', 'arcuit'], @@ -882,39 +1002,46 @@ def find_traject_config(self): ) if result.returncode == 0: arcuit_path = result.stdout.strip() - # Prefer config at gem root, fall back to legacy subdirectory layout + self.log.debug(f' Found arcuit gem at: {arcuit_path}') candidate_paths = [ os.path.join(arcuit_path, 'traject_config_eac_cpf.rb'), - os.path.join(arcuit_path, 'arcflow', 'traject_config_eac_cpf.rb'), + os.path.join(arcuit_path, 'lib', 'arcuit', 'traject', 'traject_config_eac_cpf.rb'), ] + searched_paths.extend(candidate_paths) for traject_config in candidate_paths: if os.path.exists(traject_config): - self.log.info(f'Found traject config via bundle show: {traject_config}') + self.log.info(f'✓ Using traject config from arcuit gem: {traject_config}') return traject_config - self.log.warning( - 'bundle show arcuit succeeded but traject_config_eac_cpf.rb ' - 'was not found in any expected location under the gem root' + self.log.debug( + ' traject_config_eac_cpf.rb not found in arcuit gem ' + '(checked root and lib/arcuit/traject/ subdirectory)' ) else: - self.log.debug('bundle show arcuit failed (gem not installed?)') + self.log.debug(' arcuit gem not found via bundle show') except Exception as e: - self.log.debug(f'Error running bundle show arcuit: {e}') - # Fall back to arcuit_dir if provided - if self.arcuit_dir: - candidate_paths = [ - os.path.join(self.arcuit_dir, 'traject_config_eac_cpf.rb'), - os.path.join(self.arcuit_dir, 'arcflow', 'traject_config_eac_cpf.rb'), - ] - for traject_config in candidate_paths: - if os.path.exists(traject_config): - self.log.info(f'Using traject config from arcuit_dir: {traject_config}') - return traject_config - self.log.warning( - 'arcuit_dir provided but traject_config_eac_cpf.rb was not found ' - 'in any expected location' + self.log.debug(f' Error checking for arcuit gem: {e}') + + # Try 3: example file in arcflow package (fallback for module usage without arcuit) + # We know exactly where this file is located - at the repo root + arcflow_package_dir = os.path.dirname(os.path.abspath(__file__)) + arcflow_repo_root = os.path.dirname(arcflow_package_dir) + traject_config = os.path.join(arcflow_repo_root, 'example_traject_config_eac_cpf.rb') + searched_paths.append(traject_config) + + if os.path.exists(traject_config): + self.log.info(f'✓ Using example traject config from arcflow: {traject_config}') + self.log.info( + ' Note: Using example config. For production, copy this file to your ' + 'arcuit gem or specify location with --arcuit-dir.' ) - # No config found - self.log.warning('Could not find traject config (bundle show arcuit failed and arcuit_dir not provided or invalid)') + return traject_config + + # No config found anywhere - show all paths searched + self.log.error('✗ Could not find traject_config_eac_cpf.rb in any of these locations:') + for i, path in enumerate(searched_paths, 1): + self.log.error(f' {i}. {path}') + self.log.error('') + self.log.error(' Add traject_config_eac_cpf.rb to your arcuit gem or specify with --arcuit-dir.') return None @@ -1068,37 +1195,51 @@ def create_symlink(self, target_path, symlink_path, indent_size=0): self.log.info(f'{indent}{e}') return False - - def delete_ead(self, resource_id, ead_id, - xml_file_path, pdf_file_path, indent_size=0): + def delete_arclight_solr_record(self, solr_record_id, indent_size=0): indent = ' ' * indent_size - # delete from solr + try: response = requests.post( f'{self.solr_url}/update?commit=true', - json={'delete': {'id': ead_id}}, + json={'delete': {'id': solr_record_id}}, ) if response.status_code == 200: - self.log.info(f'{indent}Deleted EAD "{ead_id}" from ArcLight Solr.') - # delete related files after suscessful deletion from solr - for file_path in (xml_file_path, pdf_file_path): - try: - os.remove(file_path) - self.log.info(f'{indent}Deleted file {file_path}.') - except FileNotFoundError: - self.log.error(f'{indent}File {file_path} not found.') - - # delete symlink if exists - symlink_path = f'{os.path.dirname(xml_file_path)}/{resource_id}.xml' - try: - os.remove(symlink_path) - self.log.info(f'{indent}Deleted symlink {symlink_path}.') - except FileNotFoundError: - self.log.info(f'{indent}Symlink {symlink_path} not found.') + self.log.info(f'{indent}Deleted Solr record {solr_record_id}. from ArcLight Solr') + return True else: - self.log.error(f'{indent}Failed to delete EAD "{ead_id}" from Arclight Solr. Status code: {response.status_code}') + self.log.error( + f'{indent}Failed to delete Solr record {solr_record_id} from Arclight Solr. Status code: {response.status_code}') + return False except requests.exceptions.RequestException as e: - self.log.error(f'{indent}Error deleting EAD "{ead_id}" from ArcLight Solr: {e}') + self.log.error(f'{indent}Error deleting Solr record {solr_record_id} from ArcLight Solr: {e}') + + def delete_file(self, file_path, indent_side=0): + indent = ' ' * indent_size + + try: + os.remove(file_path) + self.log.info(f'{indent}Deleted file {file_path}.') + except FileNotFoundError: + self.log.error(f'{indent}File {file_path} not found.') + + def delete_ead(self, resource_id, ead_id, + xml_file_path, pdf_file_path, indent_size=0): + indent = ' ' * indent_size + # delete from solr + deleted_solr_record = self.delete_arclight_solr_record(ead_id, indent_size=indent_size) + if deleted_solr_record: + self.delete_file(pdf_file_path, indent=indent) + self.delete_file(xml_file_path, indent=indent) + # delete symlink if exists + symlink_path = f'{os.path.dirname(xml_file_path)}/{resource_id}.xml' + self.delete_file(symlink_path, indent=indent) + + def delete_creator(self, file_path, solr_id, indent_size=0): + indent = ' ' * indent_size + deleted_solr_record = self.delete_arclight_solr_record(solr_id, indent_size=indent_size) + if deleted_solr_record: + self.delete_file(file_path, indent=indent) + def save_config_file(self): @@ -1132,7 +1273,14 @@ def run(self): # Update creator records (unless collections-only mode) if not self.collections_only: self.process_creators() - + + # processing deleted resources is not needed when + # force-update is set or modified_since is set to 0 + if self.force_update or int(self.last_updated.timestamp()) <= 0: + self.log.info('Skipping deleted record processing.') + else: + self.process_deleted_records() + self.save_config_file() self.log.info(f'ArcFlow process completed (PID: {self.pid}). Elapsed time: {time.strftime("%H:%M:%S", time.gmtime(int(time.time()) - self.start_time))}.') @@ -1156,7 +1304,11 @@ def main(): parser.add_argument( '--solr-url', required=True, - help='URL of the Solr core',) + help='URL of the ArcLight Solr core',) + parser.add_argument( + '--aspace-solr-url', + required=True, + help='URL of the ASpace Solr core',) parser.add_argument( '--traject-extra-config', default='', @@ -1187,6 +1339,7 @@ def main(): arclight_dir=args.arclight_dir, aspace_dir=args.aspace_dir, solr_url=args.solr_url, + aspace_solr_url=args.aspace_solr_url, traject_extra_config=args.traject_extra_config, force_update=args.force_update, agents_only=args.agents_only, diff --git a/traject_config_eac_cpf.rb b/example_traject_config_eac_cpf.rb similarity index 97% rename from traject_config_eac_cpf.rb rename to example_traject_config_eac_cpf.rb index 62c9a5a..6234ccd 100644 --- a/traject_config_eac_cpf.rb +++ b/example_traject_config_eac_cpf.rb @@ -4,7 +4,9 @@ # Persons, and Families) XML documents from ArchivesSpace archival_contexts endpoint. # # Usage: -# bundle exec traject -u $SOLR_URL -c traject_config_eac_cpf.rb /path/to/agents/*.xml +# bundle exec traject -u $SOLR_URL -c example_traject_config_eac_cpf.rb /path/to/agents/*.xml +# +# For production, copy this file to your arcuit gem as traject_config_eac_cpf.rb # # The EAC-CPF XML documents are retrieved directly from ArchivesSpace via: # /repositories/{repo_id}/archival_contexts/{agent_type}/{id}.xml @@ -188,7 +190,8 @@ # Extract HTML for searchable content (matches ArcLight's bioghist_html_tesm) bioghist = record.xpath('//eac:cpfDescription/eac:description/eac:biogHist//eac:p', EAC_NS) if bioghist.any? - html = bioghist.map { |p| "<p>#{p.text}</p>" }.join("\n") + # Preserve inline EAC markup inside <eac:p> by serializing child nodes + html = bioghist.map { |p| "<p>#{p.inner_html}</p>" }.join("\n") accumulator << html end end From 7b9522a2e972b97112d4dbd821931e1204e23000 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Thu, 26 Feb 2026 11:39:16 -0500 Subject: [PATCH 04/10] fix: always use filename for id log record if filename is not expected pattern: creator_{type}_{id} --- example_traject_config_eac_cpf.rb | 83 ++++++------------------------- 1 file changed, 15 insertions(+), 68 deletions(-) diff --git a/example_traject_config_eac_cpf.rb b/example_traject_config_eac_cpf.rb index 6234ccd..be544e9 100644 --- a/example_traject_config_eac_cpf.rb +++ b/example_traject_config_eac_cpf.rb @@ -22,6 +22,9 @@ # EAC-CPF namespace - used consistently throughout this config EAC_NS = { 'eac' => 'urn:isbn:1-931666-33-4' } +# Pattern matching arcflow's creator file naming: creator_{entity_type}_{id} +CREATOR_ID_PATTERN = /^creator_(corporate_entities|people|families)_\d+$/ + settings do provide "solr.url", ENV['SOLR_URL'] || "http://localhost:8983/solr/blacklight-core" provide "solr_writer.commit_on_close", "true" @@ -38,77 +41,21 @@ context.clipboard[:is_creator] = true end -# Core identity field -# CRITICAL: The 'id' field is required by Solr's schema (uniqueKey) -# Must ensure this field is never empty or indexing will fail -# -# IMPORTANT: Real EAC-CPF from ArchivesSpace has empty <control/> element! -# Cannot rely on recordId being present. Must extract from filename or generate. +# Solr uniqueKey - extract ID from filename using arcflow's creator_{entity_type}_{id} pattern to_field 'id' do |record, accumulator, context| - # Try 1: Extract from control/recordId (if present) - record_id = record.xpath('//eac:control/eac:recordId', EAC_NS).first - record_id ||= record.xpath('//control/recordId').first - - if record_id && !record_id.text.strip.empty? - accumulator << record_id.text.strip - else - # Try 2: Extract from source filename (most reliable for ArchivesSpace exports) - # Filename format: creator_corporate_entities_584.xml or similar - source_file = context.source_record_id || context.input_name - if source_file - # Remove .xml extension and any path - id_from_filename = File.basename(source_file, '.xml') - # Check if it looks valid (starts with creator_ or agent_) - if id_from_filename =~ /^(creator_|agent_)/ - accumulator << id_from_filename - context.logger.info("Using filename-based ID: #{id_from_filename}") - else - # Try 3: Generate from entity type and name - entity_type = record.xpath('//eac:cpfDescription/eac:identity/eac:entityType', EAC_NS).first&.text&.strip - name_entry = record.xpath('//eac:cpfDescription/eac:identity/eac:nameEntry/eac:part', EAC_NS).first&.text&.strip - - if entity_type && name_entry - # Create stable ID from type and name - type_short = case entity_type - when 'corporateBody' then 'corporate' - when 'person' then 'person' - when 'family' then 'family' - else 'entity' - end - name_id = name_entry.gsub(/[^a-z0-9]/i, '_').downcase[0..50] # Limit length - generated_id = "creator_#{type_short}_#{name_id}" - accumulator << generated_id - context.logger.warn("Generated ID from name: #{generated_id}") - else - # Last resort: timestamp-based unique ID - fallback_id = "creator_unknown_#{Time.now.to_i}_#{rand(10000)}" - accumulator << fallback_id - context.logger.error("Using fallback ID: #{fallback_id}") - end - end + source_file = context.source_record_id || context.input_name + if source_file + id_from_filename = File.basename(source_file, '.xml') + if id_from_filename =~ CREATOR_ID_PATTERN + accumulator << id_from_filename + context.logger.info("Using filename-based ID: #{id_from_filename}") else - # No filename available, generate from name - entity_type = record.xpath('//eac:cpfDescription/eac:identity/eac:entityType', EAC_NS).first&.text&.strip - name_entry = record.xpath('//eac:cpfDescription/eac:identity/eac:nameEntry/eac:part', EAC_NS).first&.text&.strip - - if entity_type && name_entry - type_short = case entity_type - when 'corporateBody' then 'corporate' - when 'person' then 'person' - when 'family' then 'family' - else 'entity' - end - name_id = name_entry.gsub(/[^a-z0-9]/i, '_').downcase[0..50] - generated_id = "creator_#{type_short}_#{name_id}" - accumulator << generated_id - context.logger.warn("Generated ID from name: #{generated_id}") - else - # Absolute last resort - fallback_id = "creator_unknown_#{Time.now.to_i}_#{rand(10000)}" - accumulator << fallback_id - context.logger.error("Using fallback ID: #{fallback_id}") - end + context.logger.error("Filename doesn't match expected pattern 'creator_{type}_{id}': #{id_from_filename}") + context.skip!("Invalid ID format in filename") end + else + context.logger.error("No source filename available for record") + context.skip!("Missing source filename") end end From 635af2be2cf2339e8318e9ff28a12606b94c8f32 Mon Sep 17 00:00:00 2001 From: Alex Dryden <adryden3@illinois.edu> Date: Mon, 2 Mar 2026 16:18:47 -0500 Subject: [PATCH 05/10] fix: reduce duplicate fields and make fields dynamic --- example_traject_config_eac_cpf.rb | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/example_traject_config_eac_cpf.rb b/example_traject_config_eac_cpf.rb index be544e9..be0d297 100644 --- a/example_traject_config_eac_cpf.rb +++ b/example_traject_config_eac_cpf.rb @@ -64,13 +64,13 @@ accumulator << 'true' end -# Record type -to_field 'record_type' do |record, accumulator| - accumulator << 'creator' -end +# # Record type +# to_field 'record_type' do |record, accumulator| +# accumulator << 'creator' +# end # Entity type (corporateBody, person, family) -to_field 'entity_type' do |record, accumulator| +to_field 'entity_type_ssi' do |record, accumulator| entity = record.xpath('//eac:cpfDescription/eac:identity/eac:entityType', EAC_NS).first accumulator << entity.text if entity end @@ -198,7 +198,7 @@ end # Agent source URI (from original ArchivesSpace) -to_field 'agent_uri' do |record, accumulator| +to_field 'agent_uri_ssi' do |record, accumulator| # Try to extract from control section or otherRecordId other_id = record.xpath('//eac:control/eac:otherRecordId[@localType="archivesspace_uri"]', EAC_NS).first if other_id @@ -211,10 +211,10 @@ accumulator << Time.now.utc.iso8601 end -# Document type marker -to_field 'document_type' do |record, accumulator| - accumulator << 'creator' -end +# # Document type marker +# to_field 'document_type' do |record, accumulator| +# accumulator << 'creator' +# end # Log successful indexing each_record do |record, context| From 24d86a6fc0002341084e0a1d71cda2f1dccdef41 Mon Sep 17 00:00:00 2001 From: Alex Dryden <adryden3@illinois.edu> Date: Mon, 2 Mar 2026 17:30:45 -0500 Subject: [PATCH 06/10] feat: store related agent ids, uris, and relationsips in arrays --- example_traject_config_eac_cpf.rb | 74 ++++++++++++++++++++++++++----- 1 file changed, 62 insertions(+), 12 deletions(-) diff --git a/example_traject_config_eac_cpf.rb b/example_traject_config_eac_cpf.rb index be0d297..52d0050 100644 --- a/example_traject_config_eac_cpf.rb +++ b/example_traject_config_eac_cpf.rb @@ -25,6 +25,9 @@ # Pattern matching arcflow's creator file naming: creator_{entity_type}_{id} CREATOR_ID_PATTERN = /^creator_(corporate_entities|people|families)_\d+$/ +# Entity types - SINGLE SOURCE OF TRUTH +ENTITY_TYPES = ['corporate_entities', 'people', 'families'] + settings do provide "solr.url", ENV['SOLR_URL'] || "http://localhost:8983/solr/blacklight-core" provide "solr_writer.commit_on_close", "true" @@ -160,26 +163,25 @@ accumulator << bioghist.map(&:text).join(' ') if bioghist.any? end -# Related agents (from cpfRelation elements) -to_field 'related_agents_ssim' do |record, accumulator| +# Related agents (from cpfRelation elements) for display parsing and debugging, stored as a single line +# "https://archivesspace-stage.library.illinois.edu/agents/corporate_entities/57|associative" +to_field 'related_agents_debug_ssim' do |record, accumulator| relations = record.xpath('//eac:cpfDescription/eac:relations/eac:cpfRelation', EAC_NS) relations.each do |rel| - # Get the related entity href/identifier href = rel['href'] || rel['xlink:href'] relation_type = rel['cpfRelationType'] - + if href - # Store as: "uri|type" for easy parsing later - accumulator << "#{href}|#{relation_type}" - elsif relation_entry = rel.xpath('eac:relationEntry', EAC_NS).first - # If no href, at least store the name - name = relation_entry.text - accumulator << "#{name}|#{relation_type}" if name + solr_id = aspace_uri_to_solr_id(href) + if solr_id + # Format: "solr_id|type" + accumulator << "#{solr_id}|#{relation_type || 'unknown'}" + end end end end -# Related agents - just URIs (for simpler queries) +# Related agents - ASpace URIs, in parallel array to match ids and types to_field 'related_agent_uris_ssim' do |record, accumulator| relations = record.xpath('//eac:cpfDescription/eac:relations/eac:cpfRelation', EAC_NS) relations.each do |rel| @@ -188,7 +190,31 @@ end end -# Relationship types +# Related agents - Parallel array of relationship ids to match relationship types and uris +to_field 'related_agent_ids_ssim' do |record, accumulator| + relations = record.xpath('//eac:cpfDescription/eac:relations/eac:cpfRelation', EAC_NS) + relations.each do |rel| + href = rel['href'] || rel['xlink:href'] + if href + solr_id = aspace_uri_to_solr_id(href) # CONVERT URI TO ID + accumulator << solr_id if solr_id + end + end +end + +# Related Agents - Parallel array of relationship types to match relationship ids and uris +to_field 'related_agent_relationship_types_ssim' do |record, accumulator| + relations = record.xpath('//eac:cpfDescription/eac:relations/eac:cpfRelation', EAC_NS) + relations.each do |rel| + href = rel['href'] || rel['xlink:href'] + if href + relation_type = rel['cpfRelationType'] || 'unknown' + accumulator << relation_type # NO deduplication - keeps array parallel + end + end +end + +# Relationship types used for faceting, to_field 'relationship_types_ssim' do |record, accumulator| relations = record.xpath('//eac:cpfDescription/eac:relations/eac:cpfRelation', EAC_NS) relations.each do |rel| @@ -223,3 +249,27 @@ context.logger.info("Indexed creator: #{record_id.text}") end end + + + + +# Pattern matching arcflow's creator file naming: creator_{entity_type}_{id} +CREATOR_ID_PATTERN = /^creator_(#{ENTITY_TYPES.join('|')})_\d+$/ + +# Helper to build and validate creator IDs +def build_creator_id(entity_type, id_number) + creator_id = "creator_#{entity_type}_#{id_number}" + unless creator_id =~ CREATOR_ID_PATTERN + raise ArgumentError, "Invalid creator ID: #{creator_id} doesn't match pattern" + end + creator_id +end + +# Helper to convert ArchivesSpace URI to Solr creator ID +def aspace_uri_to_solr_id(uri) + return nil unless uri + # Match: /agents/{type}/{id} or https://.../agents/{type}/{id} + if uri =~ /agents\/(#{ENTITY_TYPES.join('|')})\/(\d+)/ + build_creator_id($1, $2) + end +end \ No newline at end of file From 3676246495462a13dd1ff64f8c89521b1ec0b8f6 Mon Sep 17 00:00:00 2001 From: Alex Dryden <adryden3@illinois.edu> Date: Tue, 3 Mar 2026 13:16:39 -0500 Subject: [PATCH 07/10] this will require further refinement, but for now this will be a more conservative list of things we know are relevant --- arcflow/main.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/arcflow/main.py b/arcflow/main.py index bf9375b..4613ca6 100644 --- a/arcflow/main.py +++ b/arcflow/main.py @@ -701,16 +701,11 @@ def _get_target_agent_criteria(self, modified_since=0): Defines the Solr query criteria for "target" agents. These are agents we want to process. """ - # Basic filters for agents to include criteria = [ + "linked_agent_roles:creator", "system_generated:false", "is_user:false", - "is_repo_agent:false", - # Include agents that are creators OR are linked to published records - "(linked_agent_roles:creator OR is_linked_to_published_record:true)", - # Exclude agents whose ONLY role is 'donor' - # This logic says: "NOT (role is only donor)" - "(*:* -linked_agent_roles:donor OR (*:* AND linked_agent_roles:[* TO *] AND (*:* -linked_agent_roles:donor)))" +# "is_repo_agent:false", ] # Add time filter if applicable @@ -727,11 +722,10 @@ def _get_nontarget_agent_criteria(self, modified_since=0): """ # The core logic for what makes an agent a "target" target_logic = " AND ".join([ + "linked_agent_roles:creator", "system_generated:false", "is_user:false", - "is_repo_agent:false", - "(linked_agent_roles:creator OR is_linked_to_published_record:true)", - "(*:* -linked_agent_roles:donor OR (*:* AND linked_agent_roles:[* TO *] AND (*:* -linked_agent_roles:donor)))" +# "is_repo_agent:false", ]) # We find non-targets by negating the entire block of target logic From 29907fb43ce373e70b2d2cca49fd4fc8d8ae124e Mon Sep 17 00:00:00 2001 From: Alex Dryden <adryden3@illinois.edu> Date: Tue, 3 Mar 2026 13:17:01 -0500 Subject: [PATCH 08/10] ensure passing of indent size and not the indent string --- arcflow/main.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/arcflow/main.py b/arcflow/main.py index 4613ca6..44f8c1b 100644 --- a/arcflow/main.py +++ b/arcflow/main.py @@ -541,7 +541,7 @@ def process_deleted_records(self): ead_id.replace('.', '-'), # dashes in Solr f'{xml_dir}/{ead_id}.xml', # dots in filenames f'{pdf_dir}/{ead_id}.pdf', - indent=4) + indent_size=4) else: self.log.error(f'{" " * (indent_size+2)}Symlink {symlink_path} not found. Unable to delete the associated EAD from Arclight Solr.') @@ -1207,7 +1207,7 @@ def delete_arclight_solr_record(self, solr_record_id, indent_size=0): except requests.exceptions.RequestException as e: self.log.error(f'{indent}Error deleting Solr record {solr_record_id} from ArcLight Solr: {e}') - def delete_file(self, file_path, indent_side=0): + def delete_file(self, file_path, indent_size=0): indent = ' ' * indent_size try: @@ -1218,21 +1218,19 @@ def delete_file(self, file_path, indent_side=0): def delete_ead(self, resource_id, ead_id, xml_file_path, pdf_file_path, indent_size=0): - indent = ' ' * indent_size # delete from solr deleted_solr_record = self.delete_arclight_solr_record(ead_id, indent_size=indent_size) if deleted_solr_record: - self.delete_file(pdf_file_path, indent=indent) - self.delete_file(xml_file_path, indent=indent) + self.delete_file(pdf_file_path, indent_size=indent_size) + self.delete_file(xml_file_path, indent_size=indent_size) # delete symlink if exists symlink_path = f'{os.path.dirname(xml_file_path)}/{resource_id}.xml' - self.delete_file(symlink_path, indent=indent) + self.delete_file(symlink_path, indent_size=indent_size) def delete_creator(self, file_path, solr_id, indent_size=0): - indent = ' ' * indent_size deleted_solr_record = self.delete_arclight_solr_record(solr_id, indent_size=indent_size) if deleted_solr_record: - self.delete_file(file_path, indent=indent) + self.delete_file(file_path, indent_size=indent_size) From 595279839f945f66dd60de54e422fae46a5323f6 Mon Sep 17 00:00:00 2001 From: Alex Dryden <adryden3@illinois.edu> Date: Tue, 3 Mar 2026 13:39:52 -0500 Subject: [PATCH 09/10] Expand wildcards with glob and use list command sequence instead of string command with shell=True --- arcflow/main.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/arcflow/main.py b/arcflow/main.py index 44f8c1b..c6d8dd9 100644 --- a/arcflow/main.py +++ b/arcflow/main.py @@ -17,7 +17,7 @@ from asnake.client import ASnakeClient from multiprocessing.pool import ThreadPool as Pool from utils.stage_classifications import extract_labels - +import glob base_dir = os.path.abspath((__file__) + "/../../") log_file = os.path.join(base_dir, 'logs/arcflow.log') @@ -577,7 +577,7 @@ def index_collections(self, repo_id, xml_file_path, indent_size=0): return traject_config = f'{arclight_path}/lib/arclight/traject/ead2_config.rb' - + xml_files = glob.glob(xml_file_path) # Returns list of matching files cmd = [ 'bundle', 'exec', 'traject', '-u', self.solr_url, @@ -586,8 +586,8 @@ def index_collections(self, repo_id, xml_file_path, indent_size=0): '-s', f'solr_writer.batch_size={self.batch_size}', '-s', 'solr_writer.commit_on_close=true', '-i', 'xml', - '-c', traject_config - ] + '-c', traject_config, + ] + xml_files if self.traject_extra_config: if isinstance(self.traject_extra_config, (list, tuple)): @@ -595,14 +595,11 @@ def index_collections(self, repo_id, xml_file_path, indent_size=0): else: # Treat a string extra config as a path and pass it with -c cmd.extend(['-c', self.traject_extra_config]) - - cmd.append(xml_file_path) - + env = os.environ.copy() env['REPOSITORY_ID'] = str(repo_id) - cmd_string = ' '.join(cmd) result = subprocess.run( - cmd_string, + cmd, cwd=self.arclight_dir, env=env, stderr=subprocess.PIPE, From 51d2eea40f62446dcf33853519f946ec58145a49 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Wed, 4 Mar 2026 15:40:36 -0500 Subject: [PATCH 10/10] feat(arclight#29): Refactor run orchestration for threaded and single-scope runs Restructured the pipeline for collections and creators to run independently with their own timestamps, proper cleanup, and parallel execution orchestrated via ThreadPoolExecutor Changes: - Split last_updated into last_updated_collections and last_updated_creators - Extract run_collections() and run_creators() from monolithic run() - Add run_all() that orchestrates both via ThreadPoolExecutor - Scope Solr cleanup to record type using is_creator flag - Update process_deleted_records() to accept scope parameter - Move update_repositories() into run_all() (only runs for full updates) - Fix timestamp comparisons to use min() where needed - Add directory creation safeguards (os.makedirs with exist_ok) - Change is_creator from string 'true' to boolean true - Add proper exception handling in parallel execution Benefits: - Collections and creators can be rebuilt independently (--collections-only, --agents-only) - Full runs execute both pipelines in parallel (faster) - Each record type maintains its own timestamp state - Solr cleanup is scoped to avoid deleting unrelated records --- README.md | 9 +- arcflow/main.py | 259 +++++++++++++++++++++--------- example_traject_config_eac_cpf.rb | 13 +- 3 files changed, 191 insertions(+), 90 deletions(-) diff --git a/README.md b/README.md index 2a42f18..d243ee8 100644 --- a/README.md +++ b/README.md @@ -61,9 +61,9 @@ This filtering ensures that only legitimate archival creators are discoverable i ### How Creator Records Work -1. **Extraction**: `get_all_agents()` fetches all agents from ArchivesSpace -2. **Filtering**: `is_target_agent()` filters out system users, donors, and non-creator agents -3. **Processing**: `task_agent()` generates an EAC-CPF XML document for each target agent with bioghist notes +1. **Extraction**: Agent data is exported from ArchivesSpace for use in creator records +2. **Filtering**: Creator vs. non-creator agents are determined via Solr queries built from `_get_target_agent_criteria()` and `_get_nontarget_agent_criteria()`, which exclude system users, donors, and other non-creator agents +3. **Processing**: For each target creator agent, ArcFlow generates an EAC-CPF XML document that includes bioghist notes 4. **Linking**: Handled via Solr using the persistent_id field (agents and collections linked through bioghist references) 5. **Indexing**: Creator XML files are indexed to Solr using `traject_config_eac_cpf.rb` @@ -182,7 +182,8 @@ python -m arcflow.main --arclight-dir /path --aspace-dir /path --solr-url http:/ Required arguments: - `--arclight-dir` - Path to ArcLight installation directory - `--aspace-dir` - Path to ArchivesSpace installation directory -- `--solr-url` - URL of the Solr core (e.g., http://localhost:8983/solr/blacklight-core) +- `--solr-url` - URL of the ArcLight Solr core (e.g., http://localhost:8983/solr/blacklight-core) +- `--aspace-solr-url` URL of the ASpace Solr core Optional arguments: - `--force-update` - Force update of all data (recreates everything from scratch) diff --git a/arcflow/main.py b/arcflow/main.py index c6d8dd9..9273791 100644 --- a/arcflow/main.py +++ b/arcflow/main.py @@ -67,10 +67,15 @@ def __init__(self, arclight_dir, aspace_dir, solr_url, aspace_solr_url, traject_ self.start_time = int(time.time()) try: with open(self.arcflow_file_path, 'r') as file: - config = yaml.safe_load(file) + config = yaml.safe_load(file) or {} try: - self.last_updated = datetime.strptime( - config['last_updated'], '%Y-%m-%dT%H:%M:%S%z') + date_fmt = '%Y-%m-%dT%H:%M:%S%z' + epoch = datetime.fromtimestamp(0, timezone.utc) + legacy_ts = config.get('last_updated') + collections_ts_str = config.get('last_updated_collections') or legacy_ts + creators_ts_str = config.get('last_updated_creators') or legacy_ts + self.last_updated_collections = datetime.strptime(collections_ts_str, date_fmt) if collections_ts_str else epoch + self.last_updated_creators = datetime.strptime(creators_ts_str, date_fmt) if creators_ts_str else epoch except Exception as e: self.log.error(f'Error parsing last_updated date on file .arcflow.yml: {e}') exit(0) @@ -79,7 +84,8 @@ def __init__(self, arclight_dir, aspace_dir, solr_url, aspace_solr_url, traject_ self.log.error('File .arcflow.yml not found. Create the file and try again or run with --force-update to recreate EADs from scratch.') exit(0) else: - self.last_updated = datetime.fromtimestamp(0, timezone.utc) + self.last_updated_collections = datetime.fromtimestamp(0, timezone.utc) + self.last_updated_creators = datetime.fromtimestamp(0, timezone.utc) try: with open(os.path.join(base_dir, '.archivessnake.yml'), 'r') as file: config = yaml.safe_load(file) @@ -135,13 +141,16 @@ def update_repositories(self): self.log.info('Checking for updates on repositories information...') update_repos = False + # Use the oldest of the two run timestamps so that a repo change + # is detected regardless of which pipeline last ran. + last_updated = min(self.last_updated_collections, self.last_updated_creators) for repo in repos: # python doesn't support Zulu timezone suffixes, # converting system_mtime and user_mtime to UTC offset notation - if (self.last_updated <= datetime.strptime( + if (last_updated <= datetime.strptime( repo['system_mtime'].replace('Z','+0000'), '%Y-%m-%dT%H:%M:%S%z') - or self.last_updated <= datetime.strptime( + or last_updated <= datetime.strptime( repo['user_mtime'].replace('Z','+0000'), '%Y-%m-%dT%H:%M:%S%z')): update_repos = True @@ -309,6 +318,7 @@ def task_resource(self, repo, resource_id, xml_dir, pdf_dir, indent_size=0): f'{pdf_dir}/{prev_ead_id}.pdf', indent_size=indent_size) + os.makedirs(xml_dir, exist_ok=True) self.save_file(xml_file_path, xml_content, 'XML', indent_size=indent_size) self.create_symlink( os.path.basename(xml_file_path), @@ -384,6 +394,7 @@ def task_pdf(self, repo_uri, job_id, ead_id, pdf_dir, indent_size=0): else: pdf_content = b'' # empty PDF file + os.makedirs(pdf_dir, exist_ok=True) self.save_file( f'{pdf_dir}/{ead_id}.pdf', pdf_content, @@ -397,7 +408,7 @@ def task_pdf(self, repo_uri, job_id, ead_id, pdf_dir, indent_size=0): time.sleep(5) - def update_eads(self): + def process_collections(self): """ Update EADs in ArcLight with the latest data from resources in ArchivesSpace. @@ -406,34 +417,10 @@ def update_eads(self): resource_dir = f'{xml_dir}/resources' pdf_dir = f'{self.arclight_dir}/public/pdf' - modified_since = int(self.last_updated.timestamp()) - + modified_since = int(self.last_updated_collections.timestamp()) + if self.force_update or modified_since <= 0: modified_since = 0 - # delete all EADs and related files in ArcLight Solr - try: - response = requests.post( - f'{self.solr_url}/update?commit=true', - json={'delete': {'query': '*:*'}}, - ) - if response.status_code == 200: - self.log.info('Deleted all EADs and Creators from ArcLight Solr.') - # delete related directories after suscessful - # deletion from solr - for dir_path, dir_name in [(xml_dir, 'XMLs'), (pdf_dir, 'PDFs')]: - try: - shutil.rmtree(dir_path) - self.log.info(f'Deleted {dir_name} directory {dir_path}.') - except Exception as e: - self.log.error(f'Error deleting {dir_name} directory "{dir_path}": {e}') - else: - self.log.error(f'Failed to delete all EADs from Arclight Solr. Status code: {response.status_code}') - except requests.exceptions.RequestException as e: - self.log.error(f'Error deleting all EADs and Creators from ArcLight Solr: {e}') - - # create directories if don't exist - for dir_path in (resource_dir, pdf_dir): - os.makedirs(dir_path, exist_ok=True) # process resources that have been modified in ArchivesSpace since last update self.log.info('Fetching resources from ArchivesSpace...') @@ -504,23 +491,36 @@ def update_eads(self): - def process_deleted_records(self): + def process_deleted_records(self, scope): + """ + Process records deleted in ArchivesSpace since the last run. + scope: 'collections', 'creators', or 'all' + Determines which record types are checked for deletion and which + timestamp is used as the lower bound for the delete-feed query. + """ xml_dir = f'{self.arclight_dir}/public/xml' resource_dir = f'{xml_dir}/resources' agent_dir = f'{xml_dir}/agents' pdf_dir = f'{self.arclight_dir}/public/pdf' - modified_since = int(self.last_updated.timestamp()) - # process records that have been deleted since last update in ArchivesSpace + # Use the earlier timestamp when both types are in scope so no + # deletions are missed. Per-type filtering happens in the loop below. + if scope == 'all': + modified_since = min(int(self.last_updated_collections.timestamp()), + int(self.last_updated_creators.timestamp())) + elif scope == 'collections': + modified_since = int(self.last_updated_collections.timestamp()) + else: # 'creators' + modified_since = int(self.last_updated_creators.timestamp()) + resource_pattern = r'^/repositories/(?P<repo_id>\d+)/resources/(?P<record_id>\d+)$' agent_pattern = r'^/agents/(?P<agent_type>people|corporate_entities|families)/(?P<record_id>\d+)$' - page = 1 while True: deleted_records = self.client.get( - f'/delete-feed', + '/delete-feed', params={ 'page': page, 'modified_since': modified_since, @@ -529,29 +529,29 @@ def process_deleted_records(self): for record in deleted_records['results']: resource_match = re.match(resource_pattern, record) agent_match = re.match(agent_pattern, record) - if resource_match and not self.agents_only: - resource_id = resource_match.group('resource_id') - self.log.info(f'{" " * indent_size}Processing deleted resource ID {resource_id}...') + if resource_match and scope in ('collections', 'all'): + resource_id = resource_match.group('record_id') + self.log.info(f'Processing deleted resource ID {resource_id}...') symlink_path = f'{resource_dir}/{resource_id}.xml' ead_id = self.get_ead_from_symlink(symlink_path) if ead_id: self.delete_ead( - resource_id, + resource_id, ead_id.replace('.', '-'), # dashes in Solr - f'{xml_dir}/{ead_id}.xml', # dots in filenames - f'{pdf_dir}/{ead_id}.pdf', + f'{resource_dir}/{ead_id}.xml', # dots in filenames + f'{pdf_dir}/{ead_id}.pdf', indent_size=4) else: - self.log.error(f'{" " * (indent_size+2)}Symlink {symlink_path} not found. Unable to delete the associated EAD from Arclight Solr.') + self.log.error(f'Symlink {symlink_path} not found. Unable to delete the associated EAD from ArcLight Solr.') - if agent_match and not self.collections_only: - agent_id = agent_match.group('agent_id') - self.log.info(f'{" " * indent_size}Processing deleted agent ID {agent_id}...') + if agent_match and scope in ('creators', 'all'): + agent_type = agent_match.group('agent_type') + agent_id = agent_match.group('record_id') + self.log.info(f'Processing deleted agent ID {agent_id}...') file_path = f'{agent_dir}/{agent_id}.xml' agent_solr_id = f'creator_{agent_type}_{agent_id}' - self.delete_creator(file_path, agent_solr_id, indent_size) - + self.delete_creator(file_path, agent_solr_id) if deleted_records['last_page'] == page: break @@ -879,6 +879,7 @@ def task_agent(self, agent_uri, agents_dir, repo_id=1, indent_size=0): # Save EAC-CPF XML to file filename = f'{agents_dir}/{creator_id}.xml' + os.makedirs(agents_dir, exist_ok=True) with open(filename, 'w', encoding='utf-8') as f: f.write(eac_cpf_xml) @@ -901,15 +902,12 @@ def process_creators(self): xml_dir = f'{self.arclight_dir}/public/xml' agents_dir = f'{xml_dir}/agents' - modified_since = int(self.last_updated.timestamp()) + modified_since = int(self.last_updated_creators.timestamp()) indent_size = 0 indent = ' ' * indent_size self.log.info(f'{indent}Processing creator agents...') - # Create agents directory if it doesn't exist - os.makedirs(agents_dir, exist_ok=True) - # Get agents to process agents = self.get_all_agents(modified_since=modified_since, indent_size=indent_size) @@ -1233,47 +1231,152 @@ def delete_creator(self, file_path, solr_id, indent_size=0): def save_config_file(self): """ - Save the last updated timestamp to the .arcflow.yml file. + Save the last updated timestamps to the .arcflow.yml file. + Each record type (collections, creators) has its own timestamp so they + can be run independently without overwriting each other's state. """ try: + # Preserve timestamps for record types not processed in this run + try: + with open(self.arcflow_file_path, 'r') as file: + config = yaml.safe_load(file) or {} + except FileNotFoundError: + config = {} + config.pop('last_updated', None) # remove legacy single key if present + now = datetime.fromtimestamp(self.start_time, timezone.utc).strftime('%Y-%m-%dT%H:%M:%S%z') + if not self.agents_only: + config['last_updated_collections'] = now + if not self.collections_only: + config['last_updated_creators'] = now with open(self.arcflow_file_path, 'w') as file: - yaml.dump({ - 'last_updated': datetime.fromtimestamp(self.start_time, timezone.utc).strftime('%Y-%m-%dT%H:%M:%S%z') - }, file) + yaml.dump(config, file) self.log.info(f'Saved file .arcflow.yml.') except Exception as e: self.log.error(f'Error writing to file .arcflow.yml: {e}') + def run_collections(self): + """ + Teardown (if force_update or first run), set up directories, and + process collection EADs. + """ + xml_dir = f'{self.arclight_dir}/public/xml' + resource_dir = f'{xml_dir}/resources' + pdf_dir = f'{self.arclight_dir}/public/pdf' + + if self.force_update or int(self.last_updated_collections.timestamp()) <= 0: + # Delete only collection records from Solr so that creator records + # remain intact when collections are rebuilt independently. + # Standard query parser: '*:* AND NOT is_creator:true' matches all + # documents except those flagged as creators. + try: + response = requests.post( + f'{self.solr_url}/update?commit=true', + json={'delete': {'query': '*:* AND NOT is_creator:true'}}, + ) + if response.status_code == 200: + self.log.info('Deleted all collection records from ArcLight Solr.') + for dir_path, dir_name in [(resource_dir, 'XMLs'), (pdf_dir, 'PDFs')]: + try: + shutil.rmtree(dir_path) + self.log.info(f'Deleted {dir_name} directory {dir_path}.') + except Exception as e: + self.log.error(f'Error deleting {dir_name} directory "{dir_path}": {e}') + else: + self.log.error(f'Failed to delete collection records from ArcLight Solr. Status code: {response.status_code}') + except requests.exceptions.RequestException as e: + self.log.error(f'Error deleting collection records from ArcLight Solr: {e}') + + os.makedirs(resource_dir, exist_ok=True) + os.makedirs(pdf_dir, exist_ok=True) + self.process_collections() + + + def run_creators(self): + """ + Teardown (if force_update or first run), set up directories, and + process creator agents. + """ + xml_dir = f'{self.arclight_dir}/public/xml' + agents_dir = f'{xml_dir}/agents' + + if self.force_update or int(self.last_updated_creators.timestamp()) <= 0: + # Delete only creator records from Solr (collections are handled separately). + try: + response = requests.post( + f'{self.solr_url}/update?commit=true', + json={'delete': {'query': 'is_creator:true'}}, + ) + if response.status_code == 200: + self.log.info('Deleted all creator records from ArcLight Solr.') + try: + shutil.rmtree(agents_dir) + self.log.info(f'Deleted agents directory {agents_dir}.') + except Exception as e: + self.log.error(f'Error deleting agents directory "{agents_dir}": {e}') + else: + self.log.error(f'Failed to delete creator records from ArcLight Solr. Status code: {response.status_code}') + except requests.exceptions.RequestException as e: + self.log.error(f'Error deleting creator records from ArcLight Solr: {e}') + + os.makedirs(agents_dir, exist_ok=True) + self.process_creators() + + + def run_all(self): + """ + Run all record-type workflows. + Updates repository metadata, then runs all record-type workflows in parallel. + This is the default execution path. When new record-type workflows + are introduced, add them here. + """ + self.update_repositories() + workflows = [self.run_collections, self.run_creators] + with concurrent.futures.ThreadPoolExecutor(max_workers=len(workflows)) as executor: + self.log.info('Running collections and creators in parallel...') + futures = [executor.submit(w) for w in workflows] + concurrent.futures.wait(futures) + exceptions = [] + for future in futures: + exc = future.exception() + if exc is not None: + self.log.error(f'Workflow failed: {exc}') + exceptions.append(exc) + if exceptions: + # Raise the first exception to signal overall failure and prevent + # downstream deleted-record processing and config timestamp updates. + raise exceptions[0] def run(self): """ Run the ArcFlow process. """ self.log.info(f'ArcFlow process started (PID: {self.pid}).') - - # Update repositories (unless agents-only mode) - if not self.agents_only: - self.update_repositories() - - # Update collections/EADs (unless agents-only mode) - if not self.agents_only: - self.update_eads() - - # Update creator records (unless collections-only mode) - if not self.collections_only: - self.process_creators() - # processing deleted resources is not needed when - # force-update is set or modified_since is set to 0 - if self.force_update or int(self.last_updated.timestamp()) <= 0: + if self.collections_only: + scope = 'collections' + self.run_collections() + elif self.agents_only: + scope = 'creators' + self.run_creators() + else: + scope = 'all' + self.run_all() + + # Skip deleted record processing on force_update or if all active + # timestamps indicate a first run (nothing has been indexed yet). + active_timestamps = [] + if scope in ('collections', 'all'): + active_timestamps.append(int(self.last_updated_collections.timestamp())) + if scope in ('creators', 'all'): + active_timestamps.append(int(self.last_updated_creators.timestamp())) + if self.force_update or all(t <= 0 for t in active_timestamps): self.log.info('Skipping deleted record processing.') else: - self.process_deleted_records() + self.process_deleted_records(scope) self.save_config_file() self.log.info(f'ArcFlow process completed (PID: {self.pid}). Elapsed time: {time.strftime("%H:%M:%S", time.gmtime(int(time.time()) - self.start_time))}.') - def main(): @@ -1305,11 +1408,11 @@ def main(): parser.add_argument( '--agents-only', action='store_true', - help='Process only agent records, skip collections (for testing)',) + help='Process only agent records, skip collections',) parser.add_argument( '--collections-only', action='store_true', - help='Process only repositories and collections, skip creator processing',) + help='Process only collections, skip creator processing',) parser.add_argument( '--arcuit-dir', default=None, @@ -1317,7 +1420,7 @@ def main(): parser.add_argument( '--skip-creator-indexing', action='store_true', - help='Generate creator XML files but skip Solr indexing (for testing)',) + help='Generate creator XML files but skip Solr indexing',) args = parser.parse_args() # Validate mutually exclusive flags diff --git a/example_traject_config_eac_cpf.rb b/example_traject_config_eac_cpf.rb index 52d0050..f17dde8 100644 --- a/example_traject_config_eac_cpf.rb +++ b/example_traject_config_eac_cpf.rb @@ -22,12 +22,12 @@ # EAC-CPF namespace - used consistently throughout this config EAC_NS = { 'eac' => 'urn:isbn:1-931666-33-4' } -# Pattern matching arcflow's creator file naming: creator_{entity_type}_{id} -CREATOR_ID_PATTERN = /^creator_(corporate_entities|people|families)_\d+$/ - # Entity types - SINGLE SOURCE OF TRUTH ENTITY_TYPES = ['corporate_entities', 'people', 'families'] +# Pattern matching arcflow's creator file naming: creator_{entity_type}_{id} +CREATOR_ID_PATTERN = /^creator_(#{ENTITY_TYPES.join('|')})_\d+$/ + settings do provide "solr.url", ENV['SOLR_URL'] || "http://localhost:8983/solr/blacklight-core" provide "solr_writer.commit_on_close", "true" @@ -62,9 +62,9 @@ end end -# Add is_creator marker field +# Add is_creator boolean marker field to_field 'is_creator' do |record, accumulator| - accumulator << 'true' + accumulator << true end # # Record type @@ -253,9 +253,6 @@ -# Pattern matching arcflow's creator file naming: creator_{entity_type}_{id} -CREATOR_ID_PATTERN = /^creator_(#{ENTITY_TYPES.join('|')})_\d+$/ - # Helper to build and validate creator IDs def build_creator_id(entity_type, id_number) creator_id = "creator_#{entity_type}_#{id_number}"