diff --git a/.gitignore b/.gitignore
index 0bcd497..0e46e2e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -217,3 +217,6 @@ __marimo__/
# Sphinx documentation
docs/_build/
+
+# macOS system files
+**/.DS_Store
diff --git a/pyproject.toml b/pyproject.toml
index 77bc520..957f524 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -30,6 +30,8 @@ keywords = [
dependencies = [
"XBlock",
"Django>=4.2",
+ "django-crum",
+ "openedx-filters",
]
[project.urls]
@@ -41,6 +43,7 @@ Documentation = "https://xblocks-extra.readthedocs.io"
dev = [
"build",
"ruff",
+ "edx-i18n-tools",
]
test = [
"pytest>=7.0",
@@ -54,6 +57,13 @@ docs = [
[project.entry-points."xblock.v1"]
audio = "audio:AudioXBlock"
+feedback = "feedback.feedback:FeedbackXBlock"
+
+[project.entry-points."xblock.test.v0"]
+feedbacktest = "feedback.feedbacktests:feedbacktests"
+
+[project.entry-points."lms.djangoapp"]
+feedback = "feedback.apps:FeedbackConfig"
# Packages live in src/ but are installed without the src prefix
# e.g., src/foo_xblock/ is installed as foo_xblock
diff --git a/src/feedback/.DS_Store b/src/feedback/.DS_Store
new file mode 100644
index 0000000..f9bb98f
Binary files /dev/null and b/src/feedback/.DS_Store differ
diff --git a/src/feedback/README.rst b/src/feedback/README.rst
new file mode 100644
index 0000000..c7eb5b0
--- /dev/null
+++ b/src/feedback/README.rst
@@ -0,0 +1,137 @@
+##############
+FeedbackXBlock
+##############
+| |License: AGPL v3| |Status| |Python CI| |Publish package to PyPi|
+
+.. |License: AGPL v3| image:: https://img.shields.io/badge/License-AGPL_v3-blue.svg
+ :target: https://www.gnu.org/licenses/agpl-3.0
+
+.. |Python CI| image:: https://github.com/openedx/FeedbackXBlock/actions/workflows/ci.yml/badge.svg
+ :target: https://github.com/openedx/FeedbackXBlock/actions/workflows/ci.yml
+
+.. |Publish package to PyPi| image:: https://github.com/openedx/FeedbackXBlock/actions/workflows/pypi-release.yml/badge.svg
+ :target: https://github.com/openedx/FeedbackXBlock/actions/workflows/pypi-release.yml
+
+.. |Status| image:: https://img.shields.io/badge/status-maintained-31c653
+
+Purpose
+=======
+
+`XBlock`_ is the Open edX component architecture for building custom
+learning interactives.
+
+.. _XBlock: https://openedx.org/r/xblock
+
+The FeedbackXBlock encourages learners to reflect on their learning experiences and allows instructors to capture feedback from learners. Feedback is provided as sentiment on a predefined scale and free text feedback. Feedback can be aggregated by instructors to understand which parts of a course work well and which parts work poorly.
+
+The block can be placed anywhere in the courseware, and students can
+provide feedback related to those sections. With just a few database queries,
+we can compile that feedback into useful insights. ;) We do provide
+aggregate statistics to instructors, but not yet the text of the
+feedback.
+
+.. |Good to bad scale| image:: happy_sad_example.png
+.. |Scale where good is in the middle| image:: happy_sad_happy_example.png
+.. |Numberical scale| image:: numerical_example.png
+
+The instructors can view reports in their course instructor dashboard. The reports shows the count for every score, the average sentiment score, and the last 10 feedback comments.
+
+Tutor configuration
+-------------------
+
+To enable the FeedbackXBlock report in the instructor dashboard, you can use the following tutor inline plugins:
+
+.. code-block:: yaml
+
+ name: feedback-xblock-settings
+ version: 0.1.0
+ patches:
+ openedx-common-settings: |
+ FEATURES["ENABLE_FEEDBACK_INSTRUCTOR_VIEW"] = True
+ OPEN_EDX_FILTERS_CONFIG = {
+ "org.openedx.learning.instructor.dashboard.render.started.v1": {
+ "fail_silently": False,
+ "pipeline": [
+ "feedback.extensions.filters.AddFeedbackTab",
+ ]
+ },
+ }
+
+To enable this plugin you need to create a file called *feedback-xblock-settings.yml* in your tutor plugins directory of your tutor instance
+with the content of the previous code block, and run the following commands.
+
+.. code-block:: bash
+
+ tutor plugins enable feedback-xblock-settings
+ tutor config save
+
+
+You can find more information about tutor plugins in the Tutor `plugins`_ documentation.
+
+.. _plugins: https://docs.tutor.edly.io/tutorials/plugin.html
+
+Getting Started
+===============
+
+.. TODO Make it possible to run in the Workbench.
+
+For details regarding how to deploy this or any other XBlock in the lms instance, see the `installing-the-xblock`_ documentation.
+
+.. _installing-the-xblock: https://docs.tutor.edly.io/configuration.html#installing-extra-xblocks-and-requirements
+
+Getting Help
+============
+
+If you're having trouble, we have discussion forums at
+https://discuss.openedx.org where you can connect with others in the
+community.
+
+Our real-time conversations are on Slack. You can request a `Slack
+invitation`_, then join our `community Slack workspace`_.
+
+For anything non-trivial, the best path is to open an issue in this
+repository with as many details about the issue you are facing as you
+can provide.
+
+https://github.com/openedx/FeedbackXBlock/issues
+
+For more information about these options, see the `Getting Help`_ page.
+
+.. _Slack invitation: https://openedx.org/slack
+.. _community Slack workspace: https://openedx.slack.com/
+.. _Getting Help: https://openedx.org/getting-help
+
+How to Contribute
+=================
+
+Details about how to become a contributor to the Open edX project may
+be found in the wiki at `How to contribute`_
+
+.. _How to contribute: https://openedx.org/r/how-to-contribute
+
+The Open edX Code of Conduct
+----------------------------
+
+All community members should familarize themselves with the `Open edX Code of Conduct`_.
+
+.. _Open edX Code of Conduct: https://openedx.org/code-of-conduct/
+
+People
+======
+
+The assigned maintainers for this component and other project details
+may be found in `Backstage`_ or groked from inspecting catalog-info.yaml.
+
+.. _Backstage: https://open-edx-backstage.herokuapp.com/catalog/default/component/FeedbackXBlock
+
+Reporting Security Issues
+=========================
+
+Please do not report security issues in public. Please email security@openedx.org.
+
+History
+=======
+
+This is a basic clone of Dropthought for use in Open edX. This used to
+be called the RateXBlock. We renamed it for better consistency. We are
+keeping the old one around for backwards-compatibility.
diff --git a/src/feedback/__init__.py b/src/feedback/__init__.py
new file mode 100644
index 0000000..0a2388e
--- /dev/null
+++ b/src/feedback/__init__.py
@@ -0,0 +1,10 @@
+"""
+An edX XBlock designed to allow people to provide feedback on our
+course resources, and to think and synthesize about their experience
+in the course.
+"""
+
+import os
+from pathlib import Path
+
+ROOT_DIRECTORY = Path(os.path.dirname(os.path.abspath(__file__)))
diff --git a/src/feedback/apps.py b/src/feedback/apps.py
new file mode 100644
index 0000000..dfc9de6
--- /dev/null
+++ b/src/feedback/apps.py
@@ -0,0 +1,28 @@
+"""
+forum_email_notifier Django application initialization.
+"""
+
+from django.apps import AppConfig
+
+
+class FeedbackConfig(AppConfig):
+ """
+ Configuration for the feedback Django application.
+ """
+
+ name = "feedback"
+
+ plugin_app = {
+ "settings_config": {
+ "lms.djangoapp": {
+ "common": {"relative_path": "settings.common"},
+ "test": {"relative_path": "settings.test"},
+ "production": {"relative_path": "settings.production"},
+ },
+ "cms.djangoapp": {
+ "common": {"relative_path": "settings.common"},
+ "test": {"relative_path": "settings.test"},
+ "production": {"relative_path": "settings.production"},
+ },
+ },
+ }
diff --git a/src/feedback/conf/locale/config.yaml b/src/feedback/conf/locale/config.yaml
new file mode 100644
index 0000000..a968d94
--- /dev/null
+++ b/src/feedback/conf/locale/config.yaml
@@ -0,0 +1,4 @@
+# Configuration for i18n workflow.
+
+locales:
+ - en # English - Source Language
diff --git a/src/feedback/extensions/__init__.py b/src/feedback/extensions/__init__.py
new file mode 100644
index 0000000..23dfa8c
--- /dev/null
+++ b/src/feedback/extensions/__init__.py
@@ -0,0 +1,3 @@
+"""
+Open edX filters extensions module.
+"""
diff --git a/src/feedback/extensions/filters.py b/src/feedback/extensions/filters.py
new file mode 100644
index 0000000..b015e87
--- /dev/null
+++ b/src/feedback/extensions/filters.py
@@ -0,0 +1,184 @@
+"""
+Open edX Filters needed for instructor dashboard integration.
+"""
+
+import importlib.resources
+
+from crum import get_current_request
+from django.conf import settings
+from django.template import Context, Template
+from openedx_filters import PipelineStep
+from web_fragments.fragment import Fragment
+
+try:
+ from cms.djangoapps.contentstore.utils import get_lms_link_for_item
+ from lms.djangoapps.courseware.block_render import get_block_by_usage_id, load_single_xblock
+ from openedx.core.djangoapps.enrollments.data import get_user_enrollments
+ from xmodule.modulestore.django import modulestore
+except ImportError:
+ load_single_xblock = None
+ get_block_by_usage_id = None
+ modulestore = None
+ get_user_enrollments = None
+ get_lms_link_for_item = None
+
+TEMPLATE_ABSOLUTE_PATH = "/instructor_dashboard/"
+BLOCK_CATEGORY = "feedback"
+TEMPLATE_CATEGORY = "feedback_instructor"
+
+
+class AddFeedbackTab(PipelineStep):
+ """Add forum_notifier tab to instructor dashboard by adding a new context with feedback data."""
+
+ def run_filter(self, context, template_name): # pylint: disable=unused-argument, arguments-differ
+ """Execute filter that modifies the instructor dashboard context.
+ Args:
+ context (dict): the context for the instructor dashboard.
+ _ (str): instructor dashboard template name.
+ """
+ if not settings.FEATURES.get("ENABLE_FEEDBACK_INSTRUCTOR_VIEW", False):
+ return {
+ "context": context,
+ }
+
+ course = context["course"]
+ template = Template(self.resource_string(f"static/html/{TEMPLATE_CATEGORY}.html"))
+
+ request = get_current_request()
+
+ context.update(
+ {
+ "blocks": load_blocks(request, course),
+ }
+ )
+
+ html = template.render(Context(context))
+ frag = Fragment(html)
+ frag.add_css(self.resource_string(f"static/css/{TEMPLATE_CATEGORY}.css"))
+ frag.add_javascript(self.resource_string(f"static/js/src/{TEMPLATE_CATEGORY}.js"))
+
+ section_data = {
+ "fragment": frag,
+ "section_key": TEMPLATE_CATEGORY,
+ "section_display_name": "Course Feedback",
+ "course_id": str(course.id),
+ "template_path_prefix": TEMPLATE_ABSOLUTE_PATH,
+ }
+ context["sections"].append(section_data)
+
+ return {"context": context}
+
+ def resource_string(self, path):
+ """Handy helper for getting resources from our kit."""
+ return importlib.resources.files("feedback").joinpath(path).read_text(encoding="utf-8")
+
+
+def load_blocks(request, course):
+ """
+ Load feedback blocks for a given course for all enrolled students.
+
+ Arguments:
+ request (HttpRequest): Django request object.
+ course (CourseLocator): Course locator object.
+ """
+ course_id = str(course.id)
+
+ feedback_blocks = modulestore().get_items(course.id, qualifiers={"category": BLOCK_CATEGORY})
+
+ blocks = []
+
+ if not feedback_blocks:
+ return []
+
+ students = get_user_enrollments(course_id).values_list("user_id", "user__username")
+ for feedback_block in feedback_blocks:
+ block, _ = get_block_by_usage_id(
+ request,
+ str(course.id),
+ str(feedback_block.location),
+ disable_staff_debug_info=True,
+ course=course,
+ )
+ answers = load_xblock_answers(
+ request,
+ students,
+ str(course.location.course_key),
+ str(feedback_block.location),
+ course,
+ )
+
+ vote_aggregate = []
+ total_votes = 0
+ total_answers = 0
+
+ if not block.vote_aggregate:
+ block.vote_aggregate = [0] * len(block.get_prompt()["scale_text"])
+ for index, vote in enumerate(block.vote_aggregate):
+ vote_aggregate.append(
+ {
+ "scale_text": block.get_prompt()["scale_text"][index],
+ "count": vote,
+ }
+ )
+ total_answers += vote
+ # We have an inverted scale, so we need to invert the index
+ # to get the correct average rating.
+ # Excellent = 1, Very Good = 2, Good = 3, Fair = 4, Poor = 5
+ # So Excellent = 5, Very Good = 4, Good = 3, Fair = 2, Poor = 1
+ total_votes += vote * (5 - index)
+
+ try:
+ average_rating = round(total_votes / total_answers, 2)
+ except ZeroDivisionError:
+ average_rating = 0
+
+ unit = block.get_parent()
+ subsection = unit.get_parent()
+ section = subsection.get_parent()
+
+ blocks.append(
+ {
+ "display_name": block.display_name,
+ "prompts": block.prompts,
+ "vote_aggregate": vote_aggregate,
+ "answers": answers[-10:],
+ "unit_display_name": unit.display_name,
+ "subsection_display_name": subsection.display_name,
+ "section_display_name": section.display_name,
+ "average_rating": average_rating,
+ "url": get_lms_link_for_item(block.location),
+ }
+ )
+ return blocks
+
+
+def load_xblock_answers(request, students, course_id, block_id, course):
+ """
+ Load answers for a given feedback xblock instance.
+
+ Arguments:
+ request (HttpRequest): Django request object.
+ students (list): List of enrolled students.
+ course_id (str): Course ID.
+ block_id (str): Block ID.
+ course (CourseDescriptor): Course descriptor.
+ """
+ answers = []
+ for user_id, username in students:
+ student_xblock_instance = load_single_xblock(request, user_id, course_id, block_id, course)
+ if student_xblock_instance:
+ prompt = student_xblock_instance.get_prompt()
+ if student_xblock_instance.user_freeform:
+ if student_xblock_instance.user_vote != -1:
+ vote = prompt["scale_text"][student_xblock_instance.user_vote]
+ else:
+ vote = "No vote"
+ answers.append(
+ {
+ "username": username,
+ "user_vote": vote,
+ "user_freeform": student_xblock_instance.user_freeform,
+ }
+ )
+
+ return answers
diff --git a/src/feedback/feedback.py b/src/feedback/feedback.py
new file mode 100644
index 0000000..360def8
--- /dev/null
+++ b/src/feedback/feedback.py
@@ -0,0 +1,419 @@
+# pylint: disable=E1101
+"""
+This is an XBlock designed to allow people to provide feedback on our
+course resources, and to think and synthesize about their experience
+in the course.
+"""
+
+import html
+import importlib.resources
+import random
+
+import six
+from web_fragments.fragment import Fragment
+from xblock.core import XBlock
+from xblock.fields import Boolean, Float, Integer, List, Scope, String
+
+from feedback.utils import _
+
+try:
+ from xblock.utils.resources import ResourceLoader
+except ModuleNotFoundError: # For backward compatibility with releases older than Quince.
+ from xblockutils.resources import ResourceLoader
+
+resource_loader = ResourceLoader(__name__)
+
+# We provide default text which is designed to elicit student thought. We'd
+# like instructors to customize this to something highly structured (not
+# "What did you think?" and "How did you like it?".
+DEFAULT_FREEFORM = _("What did you learn from this? What was missing?")
+DEFAULT_LIKERT = _("How would you rate this as a learning experience?")
+DEFAULT_DEFAULT = _(
+ "Think about the material, and try to synthesize key lessons learned, as well as key gaps in our presentation."
+)
+DEFAULT_PLACEHOLDER = _(
+ "Take a little bit of time to reflect here. "
+ "Research shows that a meaningful synthesis will help "
+ "you better understand and remember material from "
+ "this course."
+)
+DEFAULT_ICON = "face"
+DEFAULT_SCALETEXT = [_("Excellent"), _("Good"), _("Average"), _("Fair"), _("Poor")]
+
+# Unicode alt faces are cute, but we do nulls instead for a11y.
+ICON_SETS = {
+ "face": [""] * 5, # u"😁😊😐😞😭",
+ "num": "12345",
+ "midface": [""] * 5, # u"😞😐😊😐😞"
+ "star": [""] * 5, # u "☆☆☆☆☆"
+}
+
+
+@XBlock.needs("i18n")
+class FeedbackXBlock(XBlock):
+ """
+ This is an XBlock -- eventually, hopefully an aside -- which
+ allows you to feedback content in the course. We've wanted this for a
+ long time, but Dartmouth finally encourage me to start to build
+ this.
+ """
+
+ # This is a list of prompts. If we have multiple elements in the
+ # list, one will be chosen at random. This is currently not
+ # exposed in the UX. If the prompt is missing any portions, we
+ # will default to the ones in default_prompt.
+ prompts = List(
+ default=[
+ {
+ "freeform": DEFAULT_FREEFORM,
+ "default_text": DEFAULT_DEFAULT,
+ "likert": DEFAULT_LIKERT,
+ "placeholder": DEFAULT_PLACEHOLDER,
+ "scale_text": DEFAULT_SCALETEXT,
+ "icon_set": DEFAULT_ICON,
+ }
+ ],
+ scope=Scope.settings,
+ help=_("Freeform user prompt"),
+ xml_node=True,
+ )
+
+ prompt_choice = Integer(
+ default=-1, scope=Scope.user_state, help=_("Random number generated for p. -1 if uninitialized")
+ )
+
+ user_vote = Integer(default=-1, scope=Scope.user_state, help=_("How user voted. -1 if didn't vote"))
+
+ # pylint: disable=invalid-name
+ p = Float(default=100, scope=Scope.settings, help=_("What percent of the time should this show?"))
+
+ p_user = Float(default=-1, scope=Scope.user_state, help=_("Random number generated for p. -1 if uninitialized"))
+
+ vote_aggregate = List(default=None, scope=Scope.user_state_summary, help=_("A list of user votes"))
+
+ user_freeform = String(default="", scope=Scope.user_state, help=_("Feedback"))
+
+ display_name = String(display_name=_("Display Name"), default=_("Provide Feedback"), scopde=Scope.settings)
+
+ voting_message = String(display_name=_("Voting message"), default=_("Thank you for voting!"), scope=Scope.settings)
+
+ feedback_message = String(
+ display_name=_("Feedback message"), default=_("Thank you for your feedback!"), scope=Scope.settings
+ )
+
+ show_aggregate_to_students = Boolean(
+ display_name=_("Show aggregate to students"), default=False, scope=Scope.settings
+ )
+
+ @classmethod
+ def resource_string(cls, path):
+ """Handy helper for getting resources from our kit."""
+ return importlib.resources.files(__package__).joinpath(path).read_text(encoding="utf-8")
+
+ def get_prompt(self, index=-1):
+ """
+ Return the current prompt dictionary, doing appropriate
+ randomization if necessary, and falling back to defaults when
+ necessary.
+ """
+ if index == -1:
+ index = self.prompt_choice
+
+ _ = self.runtime.service(self, "i18n").ugettext
+ # This is the default prompt if something is not specified in the
+ # settings dictionary. Note that this is not the same as the default
+ # above. The default above is the prompt the instructor starts from
+ # in a tool like Studio. This is a fallback in case some JSON fields
+ # are left unpopulated (e.g. if someone manually tweaks the database,
+ # in case of OLX authoring, and similar). The examplar above is
+ # intended as a well-structured, coherent response. This is designed
+ # as generic, to work with any content as a safe fallback.
+ prompt = {
+ "freeform": _("Please reflect on this course material"),
+ "default_text": _("Please take time to meaningfully reflect on your experience with this course material."),
+ "likert": _("Please rate your overall experience"),
+ "scale_text": [_("Excellent"), _("Good"), _("Average"), _("Fair"), _("Poor")],
+ "icon_set": "num",
+ "placeholder": _("Please take a moment to thoughtfully reflect."),
+ }
+
+ prompt.update(self.prompts[index])
+ return prompt
+
+ def student_view(self, context=None): # pylint: disable=unused-argument
+ """
+ The primary view of the FeedbackXBlock, shown to students
+ when viewing courses.
+ """
+ # Figure out which prompt we show. We set self.prompt_choice to
+ # the index of the prompt. We set it if it is out of range (either
+ # uninitiailized, or incorrect due to changing list length). Then,
+ # we grab the prompt, prepopulated with defaults.
+ if self.prompt_choice < 0 or self.prompt_choice >= len(self.prompts):
+ self.prompt_choice = random.randint(0, len(self.prompts) - 1)
+ prompt = self.get_prompt()
+
+ # Staff see vote totals, so we have slightly different HTML here.
+ item_templates_file = "templates/html/scale_item.html"
+
+ # We have five Likert fields right now, but we'd like this to
+ # be dynamic
+ indexes = range(5)
+
+ # If the user voted before, we'd like to show that
+ active_vote = ["checked" if i == self.user_vote else "" for i in indexes]
+
+ # Confirm that we do have vote totals (this may be uninitialized
+ # otherwise). This should probably go into __init__ or similar.
+ self.init_vote_aggregate()
+ votes = self.vote_aggregate
+
+ # We grab the icons. This should move to a Filesystem field so
+ # instructors can upload new ones
+ def get_url(icon_type, i):
+ """
+ Helper function to generate the URL for the icons shown in the
+ tool. Takes the type of icon (active, inactive, etc.) and
+ the number of the icon.
+
+ Note that some icon types may not be actively used in the
+ styling. For example, at the time of this writing, we do
+ selected through CSS, rather than by using those icons.
+ """
+ templates = {
+ "inactive": "public/default_icons/i{set}{i}.png",
+ "active": "public/default_icons/a{set}{i}.png",
+ }
+ template = templates[icon_type]
+ icon_file = template.format(i=i, set=prompt["icon_set"])
+ return self.runtime.local_resource_url(self, icon_file)
+
+ ina_urls = [get_url("inactive", i) for i in range(1, 6)]
+ act_urls = [get_url("active", i) for i in range(1, 6)]
+
+ # Prepare the Likert scale fragment to be embedded into the feedback form
+ scale = "".join(
+ resource_loader.render_django_template(
+ item_templates_file,
+ {
+ "scale_text": scale_text,
+ "unicode_icon": unicode_icon,
+ "idx": idx,
+ "active": active,
+ "vote_cnt": vote_cnt,
+ "ina_icon": ina_icon,
+ "act_icon": act_icon,
+ "is_display_vote_cnt": self.vote_aggregate and (self.show_aggregate_to_students or self.is_staff()),
+ },
+ i18n_service=self.runtime.service(self, "i18n"),
+ )
+ for (
+ scale_text,
+ unicode_icon,
+ idx,
+ active,
+ vote_cnt,
+ act_icon,
+ ina_icon,
+ ) in zip(
+ prompt["scale_text"],
+ ICON_SETS[(prompt["icon_set"])],
+ indexes,
+ active_vote,
+ votes,
+ act_urls,
+ ina_urls,
+ strict=False,
+ )
+ )
+ if self.user_vote != -1:
+ _ = self.runtime.service(self, "i18n").ugettext
+ response = self.voting_message
+ else:
+ response = ""
+
+ # We initialize self.p_user if not initialized -- this sets whether
+ # or not we show it. From there, if it is less than odds of showing,
+ # we set the fragment to the rendered XBlock. Otherwise, we return
+ # empty HTML. There ought to be a way to return None, but XBlocks
+ # doesn't support that.
+ if self.p_user == -1:
+ self.p_user = random.uniform(0, 100)
+ if self.p_user < self.p:
+ frag = Fragment()
+ frag.add_content(
+ resource_loader.render_django_template(
+ "templates/html/feedback.html",
+ context={
+ "self": self,
+ "scale": scale,
+ "freeform_prompt": prompt["freeform"],
+ "likert_prompt": prompt["likert"],
+ "response": response,
+ "placeholder": prompt["placeholder"],
+ },
+ i18n_service=self.runtime.service(self, "i18n"),
+ )
+ )
+ else:
+ frag = Fragment("")
+
+ # Finally, we do the standard JS+CSS boilerplate. Honestly, XBlocks
+ # ought to have a sane default here.
+ frag.add_css(self.resource_string("static/css/feedback.css"))
+ frag.add_javascript(self.resource_string("static/js/src/feedback.js"))
+ frag.initialize_js("FeedbackXBlock")
+ return frag
+
+ def studio_view(self, context): # pylint: disable=unused-argument
+ """
+ Create a fragment used to display the edit view in the Studio.
+ """
+ prompt = self.get_prompt(0)
+ for idx in range(len(prompt["scale_text"])):
+ prompt[f"likert{idx}"] = prompt["scale_text"][idx]
+ frag = Fragment()
+
+ prompt.update(
+ {
+ "display_name": self.display_name,
+ "voting_message": self.voting_message,
+ "feedback_message": self.feedback_message,
+ "show_aggregate_to_students": self.show_aggregate_to_students,
+ }
+ )
+ frag.add_content(
+ resource_loader.render_django_template(
+ "templates/html/studio_view.html", prompt, i18n_service=self.runtime.service(self, "i18n")
+ )
+ )
+ js_str = self.resource_string("static/js/src/studio.js")
+ frag.add_javascript(six.text_type(js_str))
+ frag.initialize_js("FeedbackBlock", {"icon_set": prompt["icon_set"]})
+ return frag
+
+ @XBlock.json_handler
+ def studio_submit(self, data, suffix=""): # pylint: disable=unused-argument
+ """
+ Called when submitting the form in Studio.
+ """
+ for item in ["freeform", "likert", "placeholder", "icon_set"]:
+ item_submission = data.get(item, None)
+ if item_submission and len(item_submission) > 0:
+ self.prompts[0][item] = html.escape(item_submission)
+ for i in range(5):
+ likert = data.get(f"likert{i}", None)
+ if likert and len(likert) > 0:
+ self.prompts[0]["scale_text"][i] = html.escape(likert)
+
+ self.display_name = data.get("display_name")
+ self.voting_message = data.get("voting_message")
+ self.feedback_message = data.get("feedback_message")
+ self.show_aggregate_to_students = data.get("show_aggregate_to_students")
+
+ return {"result": "success"}
+
+ def init_vote_aggregate(self):
+ """
+ There are a lot of places we read the aggregate vote counts. We
+ start out with these uninitialized. This guarantees they are
+ initialized. We'd prefer to do it this way, rather than default
+ value, since we do plan to not force scale length to be 5 in the
+ future.
+ """
+ if not self.vote_aggregate:
+ self.vote_aggregate = [0] * (len(self.get_prompt()["scale_text"]))
+
+ def vote(self, data):
+ """
+ Handle voting
+ """
+ # prompt_choice is initialized by student view.
+ # Ideally, we'd break this out into a function.
+ _prompt = self.get_prompt(self.prompt_choice) # pylint: disable=unused-variable
+ # Make sure we're initialized
+ self.init_vote_aggregate()
+
+ # Remove old vote if we voted before
+ if self.user_vote != -1:
+ self.vote_aggregate[self.user_vote] -= 1
+
+ self.user_vote = data["vote"]
+ self.vote_aggregate[self.user_vote] += 1
+
+ @XBlock.json_handler
+ def feedback(self, data, suffix=""): # pylint: disable=unused-argument
+ """
+ Allow students to submit feedback, both numerical and
+ qualitative. We only update the specific type of feedback
+ submitted.
+
+ We return the current state. While this is not used by the
+ client code, it is helpful for testing. For staff users, we
+ also return the aggregate results.
+ """
+ _ = self.runtime.service(self, "i18n").ugettext
+
+ if "freeform" not in data and "vote" not in data:
+ response = {"success": False, "response": _("Please vote!")}
+ self.runtime.publish(self, "edx.feedbackxblock.nothing_provided", {})
+ if "vote" in data:
+ response = {"success": True, "response": self.voting_message}
+ self.runtime.publish(
+ self, "edx.feedbackxblock.likert_provided", {"old_vote": self.user_vote, "new_vote": data["vote"]}
+ )
+ self.vote(data)
+ if "freeform" in data:
+ response = {"success": True, "response": self.feedback_message}
+ self.runtime.publish(
+ self,
+ "edx.feedbackxblock.freeform_provided",
+ {"old_freeform": self.user_freeform, "new_freeform": data["freeform"]},
+ )
+ self.user_freeform = data["freeform"]
+
+ response.update(
+ { # pylint: disable=possibly-used-before-assignment
+ "freeform": self.user_freeform,
+ "vote": self.user_vote,
+ }
+ )
+
+ if self.show_aggregate_to_students or self.is_staff():
+ response["aggregate"] = self.vote_aggregate
+
+ return response
+
+ @staticmethod
+ def workbench_scenarios():
+ """
+ A canned scenario for display in the workbench.
+
+ We have three blocks. One shows up all the time (for testing). The
+ other two show up 50% of the time.
+ """
+ return [
+ (
+ "FeedbackXBlock",
+ """