diff --git a/Campaign/views.py b/Campaign/views.py index ce7cd863..36c7f927 100644 --- a/Campaign/views.py +++ b/Campaign/views.py @@ -23,6 +23,8 @@ from EvalData.models import PairwiseAssessmentResult from EvalData.models import seconds_to_timedelta from EvalData.models import TASK_DEFINITIONS +from EvalData.models import TaskAgenda +from EvalData.models.direct_assessment_document import DirectAssessmentDocumentTask # pylint: disable=import-error @@ -51,19 +53,27 @@ def campaign_status(request, campaign_name, sort_key=2): _msg = 'Failure to identify campaign {0}'.format(campaign_name) return HttpResponse(_msg, content_type='text/plain') + try: + campaign_opts = campaign.campaignOptions.lower().split(";") + # may raise KeyError + result_type = RESULT_TYPE_BY_CLASS_NAME[campaign.get_campaign_type()] + except KeyError as exc: + LOGGER.debug( + f'Invalid campaign type {campaign.get_campaign_type()} for campaign {campaign.campaignName}' + ) + LOGGER.error(exc) + return HttpResponse( + 'Invalid campaign type for campaign {0}'.format(campaign.campaignName), + content_type='text/plain', + ) + + # special handling for ESA + if "esa" in campaign_opts: + return campaign_status_esa(campaign) + _out = [] for team in campaign.teams.all(): for user in team.members.all(): - try: - campaign_opts = campaign.campaignOptions.lower().split(";") - # may raise KeyError - result_type = RESULT_TYPE_BY_CLASS_NAME[campaign.get_campaign_type()] - except KeyError as exc: - LOGGER.debug( - f'Invalid campaign type {campaign.get_campaign_type()} for campaign {campaign.campaignName}' - ) - LOGGER.error(exc) - continue _data = result_type.objects.filter( createdBy=user, completed=True, task__campaign=campaign.id @@ -118,29 +128,6 @@ def campaign_status(request, campaign_name, sort_key=2): (x[0], x[1], -len(json.loads(x[2])), x[3], x[4], x[5], x[6]) for x in _data ] - elif "esa" in campaign_opts: - is_mqm_or_esa = True - _data = _data.values_list( - 'start_time', - 'end_time', - 'score', - 'item__itemID', - 'item__targetID', - 'item__itemType', - 'item__id', - 'item__documentID', - ) - # compute time override based on document times - import collections - - _time_pairs = collections.defaultdict(list) - for x in _data: - _time_pairs[x[7] + " ||| " + x[4]].append((x[0], x[1])) - _time_pairs = [ - (min([x[0] for x in doc_v]), max([x[1] for x in doc_v])) - for doc, doc_v in _time_pairs.items() - ] - _data = [(x[0], x[1], x[2], x[3], x[4], x[5], x[6]) for x in _data] else: _data = _data.values_list( 'start_time', @@ -245,6 +232,131 @@ def campaign_status(request, campaign_name, sort_key=2): return HttpResponse(u'\n'.join(_txt), content_type='text/plain') +def campaign_status_esa(campaign) -> str: + import collections + out_str = """ + + + \n + """ + out_str += f"

{campaign.campaignName}

\n" + out_str += "\n" + out_str += "" + "".join( + f"" for x in ["Username", "Progress", "First Modified", "Last Modified", "Time (Last-First)", "Time (Real)"] + ) + "\n" + + for team in campaign.teams.all(): + for user in team.members.all(): + if user.is_staff: + continue + out_str += "" + + # Get the task for this user even when there's no completed data + task = None + + # First try to get the task from TaskAgenda + agenda = TaskAgenda.objects.filter(user=user, campaign=campaign).first() + if agenda: + # Try to get an open or completed task from the agenda + for serialized_task in agenda.serialized_open_tasks(): + potential_task = serialized_task.get_object_instance() + if isinstance(potential_task, DirectAssessmentDocumentTask): + task = potential_task + break + # If no open task, try completed tasks + if not task: + for serialized_task in agenda._completed_tasks.all(): + potential_task = serialized_task.get_object_instance() + if isinstance(potential_task, DirectAssessmentDocumentTask): + task = potential_task + break + + # Get the completed data for this user + _data = DirectAssessmentDocumentResult.objects.filter( + createdBy=user, completed=True, task__campaign=campaign.id + ) + + # If no data, show 0 progress or show that no task is assigned + if not _data: + if task: + total_count = task.items.count() + out_str += f"" + out_str += f"" + else: + # No task assigned to this user + out_str += f"" + out_str += "" + out_str += "" + out_str += "" + out_str += "" + out_str += "" + + # If we have data, show the progress + else: + if not task: + # Fallback to checking the first result's task for the task ID + task = DirectAssessmentDocumentTask.objects.filter(id=_data[0].task_id).first() + if not task: + # Skip this user if we can't find the task + out_str += f"" + out_str += "" + out_str += "" + out_str += "" + out_str += "" + out_str += "" + out_str += "\n" + continue + + total_count = task.items.count() + if total_count == len(_data): + out_str += f"" + else: + out_str += f"" + out_str += f"" + first_modified = min([x.start_time for x in _data]) + last_modified = max([x.end_time for x in _data]) + + first_modified_str = str(datetime(1970, 1, 1) + seconds_to_timedelta(first_modified)).split('.')[0] + last_modified_str = str(datetime(1970, 1, 1) + seconds_to_timedelta(last_modified)).split('.')[0] + # remove seconds + first_modified_str = ":".join(first_modified_str.split(":")[:-1]) + last_modified_str = ":".join(last_modified_str.split(":")[:-1]) + + out_str += f"" + out_str += f"" + annotation_time_upper = last_modified - first_modified + annotation_time_upper = f'{int(floor(annotation_time_upper / 3600)):0>2d}h {int(floor((annotation_time_upper % 3600) / 60)):0>2d}m' + out_str += f"" + + times = collections.defaultdict(list) + for item in _data: + times[(item.item.documentID, item.item.targetID)].append((item.start_time, item.end_time)) + times = [ + (min([x[0] for x in doc_v]), max([x[1] for x in doc_v])) + for doc, doc_v in times.items() + ] + + annotation_time = sum([b-a for a, b in times]) + annotation_time = f'{int(floor(annotation_time / 3600)):0>2d}h {int(floor((annotation_time % 3600) / 60)):0>2d}m' + + out_str += f"" + + out_str += "\n" + + out_str += "
{x}
{user.username} 💤0/{total_count} (0%){user.username} 💤No task assigned{user.username} ❌Task not found
{user.username} ✅{user.username} 🛠️{len(_data)}/{total_count} ({len(_data) / total_count:.0%}){first_modified_str}{last_modified_str}{annotation_time_upper}{annotation_time}
" + return HttpResponse(out_str, content_type='text/html') + + def stat_reliable_testing(_data, campaign_opts, result_type): _annotations = len(set([x[6] for x in _data])) _user_mean = sum([x[2] for x in _data]) / (_annotations or 1) diff --git a/EvalData/models/direct_assessment_document.py b/EvalData/models/direct_assessment_document.py index 4499bf6a..2a3a56dd 100644 --- a/EvalData/models/direct_assessment_document.py +++ b/EvalData/models/direct_assessment_document.py @@ -263,13 +263,16 @@ def next_document_for_user_mqmesa(self, user): doc_items_results, """ - # get all items (100) and try to find resul + # get all items and try to find a matching result + # TODO: probably can be optimized better + + items_user = DirectAssessmentDocumentResult.objects.filter( + activated=False, completed=True, createdBy=user + ) all_items = [ ( item, - DirectAssessmentDocumentResult.objects.filter( - item=item, activated=False, completed=True, createdBy=user - ).last(), + items_user.filter(item=item).last(), ) for item in self.items.all().order_by('id') ]