diff --git a/.github/workflows/add-depr-ticket-to-depr-board.yml b/.github/workflows/add-depr-ticket-to-depr-board.yml deleted file mode 100644 index 250e394abc11..000000000000 --- a/.github/workflows/add-depr-ticket-to-depr-board.yml +++ /dev/null @@ -1,19 +0,0 @@ -# Run the workflow that adds new tickets that are either: -# - labelled "DEPR" -# - title starts with "[DEPR]" -# - body starts with "Proposal Date" (this is the first template field) -# to the org-wide DEPR project board - -name: Add newly created DEPR issues to the DEPR project board - -on: - issues: - types: [opened] - -jobs: - routeissue: - uses: openedx/.github/.github/workflows/add-depr-ticket-to-depr-board.yml@master - secrets: - GITHUB_APP_ID: ${{ secrets.GRAPHQL_AUTH_APP_ID }} - GITHUB_APP_PRIVATE_KEY: ${{ secrets.GRAPHQL_AUTH_APP_PEM }} - SLACK_BOT_TOKEN: ${{ secrets.SLACK_ISSUE_BOT_TOKEN }} diff --git a/.github/workflows/add-remove-label-on-comment.yml b/.github/workflows/add-remove-label-on-comment.yml deleted file mode 100644 index 0f369db7d293..000000000000 --- a/.github/workflows/add-remove-label-on-comment.yml +++ /dev/null @@ -1,20 +0,0 @@ -# This workflow runs when a comment is made on the ticket -# If the comment starts with "label: " it tries to apply -# the label indicated in rest of comment. -# If the comment starts with "remove label: ", it tries -# to remove the indicated label. -# Note: Labels are allowed to have spaces and this script does -# not parse spaces (as often a space is legitimate), so the command -# "label: really long lots of words label" will apply the -# label "really long lots of words label" - -name: Allows for the adding and removing of labels via comment - -on: - issue_comment: - types: [created] - -jobs: - add_remove_labels: - uses: openedx/.github/.github/workflows/add-remove-label-on-comment.yml@master - diff --git a/.github/workflows/js-tests.yml b/.github/workflows/js-tests.yml index 94a1368e96a5..972435a820e3 100644 --- a/.github/workflows/js-tests.yml +++ b/.github/workflows/js-tests.yml @@ -4,7 +4,7 @@ on: pull_request: push: branches: - - master + - release-ulmo jobs: run_tests: diff --git a/.github/workflows/lint-imports.yml b/.github/workflows/lint-imports.yml index baf914298be2..17cc4ea0a935 100644 --- a/.github/workflows/lint-imports.yml +++ b/.github/workflows/lint-imports.yml @@ -4,7 +4,7 @@ on: pull_request: push: branches: - - master + - release-ulmo jobs: lint-imports: diff --git a/.github/workflows/lockfileversion-check.yml b/.github/workflows/lockfileversion-check.yml index 736f1f98de13..ed0b7f97de6d 100644 --- a/.github/workflows/lockfileversion-check.yml +++ b/.github/workflows/lockfileversion-check.yml @@ -5,7 +5,7 @@ name: Lockfile Version check on: push: branches: - - master + - release-ulmo pull_request: jobs: diff --git a/.github/workflows/migrations-check.yml b/.github/workflows/migrations-check.yml index cd4d09589c12..686d8d9086b0 100644 --- a/.github/workflows/migrations-check.yml +++ b/.github/workflows/migrations-check.yml @@ -5,7 +5,7 @@ on: pull_request: push: branches: - - master + - release-ulmo jobs: check_migrations: diff --git a/.github/workflows/quality-checks.yml b/.github/workflows/quality-checks.yml index 3f4cbeeb4df9..ee67e3903569 100644 --- a/.github/workflows/quality-checks.yml +++ b/.github/workflows/quality-checks.yml @@ -4,8 +4,7 @@ on: pull_request: push: branches: - - master - - open-release/lilac.master + - release-ulmo jobs: run_tests: diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml index 520cd23a678b..d9d32ab9d36d 100644 --- a/.github/workflows/semgrep.yml +++ b/.github/workflows/semgrep.yml @@ -9,7 +9,7 @@ on: pull_request: push: branches: - - master + - release-ulmo jobs: run_semgrep: diff --git a/.github/workflows/shellcheck.yml b/.github/workflows/shellcheck.yml index 2e5b04bcc2ff..a8df63a20d57 100644 --- a/.github/workflows/shellcheck.yml +++ b/.github/workflows/shellcheck.yml @@ -9,7 +9,7 @@ on: pull_request: push: branches: - - master + - release-ulmo permissions: contents: read diff --git a/.github/workflows/static-assets-check.yml b/.github/workflows/static-assets-check.yml index 43cb597c16d7..3a0afa76deb7 100644 --- a/.github/workflows/static-assets-check.yml +++ b/.github/workflows/static-assets-check.yml @@ -4,7 +4,7 @@ on: pull_request: push: branches: - - master + - release-ulmo jobs: static_assets_check: diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index d8b8c26cd049..40f5dc9e71d7 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -4,7 +4,7 @@ on: pull_request: push: branches: - - master + - release-ulmo concurrency: # We only need to be running tests for the latest commit on each PR @@ -124,26 +124,26 @@ jobs: shell: bash run: | cd test_root/log - mv pytest_warnings.json pytest_warnings_${{ matrix.shard_name }}.json + mv pytest_warnings.json pytest_warnings_${{ matrix.shard_name }}_${{ matrix.python-version }}_${{ matrix.django-version }}_${{ matrix.mongo-version }}_${{ matrix.os-version }}.json - name: save pytest warnings json file if: success() uses: actions/upload-artifact@v4 with: - name: pytest-warnings-json-${{ matrix.shard_name }} + name: pytest-warnings-json-${{ matrix.shard_name }}-${{ matrix.python-version }}-${{ matrix.django-version }}-${{ matrix.mongo-version }}-${{ matrix.os-version }} path: | test_root/log/pytest_warnings*.json overwrite: true - name: Renaming coverage data file run: | - mv reports/.coverage reports/${{ matrix.shard_name }}.coverage + mv reports/.coverage reports/${{ matrix.shard_name }}_${{ matrix.python-version }}_${{ matrix.django-version }}_${{ matrix.mongo-version }}_${{ matrix.os-version }}.coverage - name: Upload coverage uses: actions/upload-artifact@v4 with: - name: coverage-${{ matrix.shard_name }} - path: reports/${{ matrix.shard_name }}.coverage + name: coverage-${{ matrix.shard_name }}-${{ matrix.python-version }}-${{ matrix.django-version }}-${{ matrix.mongo-version }}-${{ matrix.os-version }} + path: reports/${{ matrix.shard_name }}_${{ matrix.python-version }}_${{ matrix.django-version }}_${{ matrix.mongo-version }}_${{ matrix.os-version }}.coverage overwrite: true collect-and-verify: diff --git a/.github/workflows/units-test-scripts-structures-pruning.yml b/.github/workflows/units-test-scripts-structures-pruning.yml index 14a01b592308..ef408cfe66ec 100644 --- a/.github/workflows/units-test-scripts-structures-pruning.yml +++ b/.github/workflows/units-test-scripts-structures-pruning.yml @@ -4,7 +4,7 @@ on: pull_request: push: branches: - - master + - release-ulmo jobs: test: diff --git a/.github/workflows/units-test-scripts-user-retirement.yml b/.github/workflows/units-test-scripts-user-retirement.yml index 889c43a64a48..b43bbf46b0d4 100644 --- a/.github/workflows/units-test-scripts-user-retirement.yml +++ b/.github/workflows/units-test-scripts-user-retirement.yml @@ -4,7 +4,7 @@ on: pull_request: push: branches: - - master + - release-ulmo jobs: test: diff --git a/.github/workflows/update-geolite-database.yml b/.github/workflows/update-geolite-database.yml index 484fa167a371..d9ad767ce57e 100644 --- a/.github/workflows/update-geolite-database.yml +++ b/.github/workflows/update-geolite-database.yml @@ -8,7 +8,7 @@ on: branch: description: "Target branch against which to create PR" required: false - default: "master" + default: "release-ulmo" env: MAXMIND_URL: "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=${{ secrets.MAXMIND_LICENSE_KEY }}&suffix=tar.gz" @@ -69,7 +69,7 @@ jobs: - name: Create a branch, commit the code and make a PR id: create-pr run: | - BRANCH="${{ github.actor }}/geoip2-bot-update-country-database-$(echo "${{ github.sha }}" | cut -c 1-7)" + BRANCH="${{ github.actor }}/geoip2-bot-update-country-database-${{ github.run_id }}" git checkout -b $BRANCH git add . git status @@ -79,13 +79,13 @@ jobs: --title "Update GeoLite Database" \ --body "PR generated by workflow `${{ github.workflow }}` on behalf of @${{ github.actor }}." \ --head $BRANCH \ - --base 'master' \ - --reviewer 'feanil' \ + --base 'release-ulmo' \ + --reviewer 'edx/orbi-bom' \ | grep -o 'https://github.com/.*/pull/[0-9]*') echo "PR Created: ${PR_URL}" echo "pull-request-url=$PR_URL" >> $GITHUB_OUTPUT env: - GH_TOKEN: ${{ github.token }} + GH_TOKEN: ${{ secrets.GH_PAT_WITH_ORG }} - name: Job summary run: | diff --git a/.github/workflows/upgrade-one-python-dependency.yml b/.github/workflows/upgrade-one-python-dependency.yml index 3f9678593c25..1d8170961865 100644 --- a/.github/workflows/upgrade-one-python-dependency.yml +++ b/.github/workflows/upgrade-one-python-dependency.yml @@ -6,7 +6,7 @@ on: branch: description: "Target branch to create requirements PR against" required: true - default: "master" + default: "release-ulmo" type: string package: description: "Name of package to upgrade" diff --git a/.github/workflows/upgrade-python-requirements.yml b/.github/workflows/upgrade-python-requirements.yml index cbb70b06b79d..90c6be00744c 100644 --- a/.github/workflows/upgrade-python-requirements.yml +++ b/.github/workflows/upgrade-python-requirements.yml @@ -8,7 +8,7 @@ on: branch: description: "Target branch to create requirements PR against" required: true - default: "master" + default: "release-ulmo" jobs: call-upgrade-python-requirements-workflow: # Don't run the weekly upgrade job on forks -- it will send a weekly failure email. diff --git a/.github/workflows/verify-dunder-init.yml b/.github/workflows/verify-dunder-init.yml index c3248def2f33..462f0e06af0b 100644 --- a/.github/workflows/verify-dunder-init.yml +++ b/.github/workflows/verify-dunder-init.yml @@ -3,7 +3,7 @@ name: Verify Dunder __init__.py Files on: pull_request: branches: - - master + - release-ulmo jobs: verify_dunder_init: diff --git a/cms/djangoapps/contentstore/exams.py b/cms/djangoapps/contentstore/exams.py index 6b25147c6abc..8a4ddc09425e 100644 --- a/cms/djangoapps/contentstore/exams.py +++ b/cms/djangoapps/contentstore/exams.py @@ -74,13 +74,13 @@ def register_exams(course_key): # Exams in courses not using an LTI based proctoring provider should use the original definition of due_date # from contentstore/proctoring.py. These exams are powered by the edx-proctoring plugin and not the edx-exams # microservice. + is_instructor_paced = not course.self_paced if course.proctoring_provider == 'lti_external': - due_date = ( - timed_exam.due.isoformat() if timed_exam.due - else (course.end.isoformat() if course.end else None) - ) + due_date_source = timed_exam.due if is_instructor_paced else course.end else: - due_date = timed_exam.due if not course.self_paced else None + due_date_source = timed_exam.due if is_instructor_paced else None + + due_date = due_date_source.isoformat() if due_date_source else None exams_list.append({ 'course_id': str(course_key), diff --git a/cms/djangoapps/contentstore/helpers.py b/cms/djangoapps/contentstore/helpers.py index 2bdbabc7df8c..91236a4dade9 100644 --- a/cms/djangoapps/contentstore/helpers.py +++ b/cms/djangoapps/contentstore/helpers.py @@ -745,10 +745,10 @@ def _import_file_into_course( if thumbnail_content is not None: content.thumbnail_location = thumbnail_location contentstore().save(content) - return True, {clipboard_file_path: f"static/{import_path}"} + return True, {clipboard_file_path: filename if not import_path else f"static/{import_path}"} elif current_file.content_digest == file_data_obj.md5_hash: - # The file already exists and matches exactly, so no action is needed except substitutions - return None, {clipboard_file_path: f"static/{import_path}"} + # The file already exists and matches exactly, so no action is needed + return None, {} else: # There is a conflict with some other file that has the same name. return False, {} diff --git a/cms/djangoapps/contentstore/rest_api/v1/serializers/course_waffle_flags.py b/cms/djangoapps/contentstore/rest_api/v1/serializers/course_waffle_flags.py index 3efb7b6226d4..fde90163f803 100644 --- a/cms/djangoapps/contentstore/rest_api/v1/serializers/course_waffle_flags.py +++ b/cms/djangoapps/contentstore/rest_api/v1/serializers/course_waffle_flags.py @@ -11,6 +11,7 @@ class CourseWaffleFlagsSerializer(serializers.Serializer): """ Serializer for course waffle flags """ + use_new_home_page = serializers.SerializerMethodField() use_new_custom_pages = serializers.SerializerMethodField() use_new_schedule_details_page = serializers.SerializerMethodField() @@ -31,6 +32,7 @@ class CourseWaffleFlagsSerializer(serializers.Serializer): use_react_markdown_editor = serializers.SerializerMethodField() use_video_gallery_flow = serializers.SerializerMethodField() enable_course_optimizer_check_prev_run_links = serializers.SerializerMethodField() + enable_unit_expanded_view = serializers.SerializerMethodField() def get_course_key(self): """ @@ -175,3 +177,10 @@ def get_enable_course_optimizer_check_prev_run_links(self, obj): """ course_key = self.get_course_key() return toggles.enable_course_optimizer_check_prev_run_links(course_key) + + def get_enable_unit_expanded_view(self, obj): + """ + Method to get the enable_unit_expanded_view waffle flag + """ + course_key = self.get_course_key() + return toggles.enable_unit_expanded_view(course_key) diff --git a/cms/djangoapps/contentstore/rest_api/v1/urls.py b/cms/djangoapps/contentstore/rest_api/v1/urls.py index 685a81d778ce..8a94f0b0e040 100644 --- a/cms/djangoapps/contentstore/rest_api/v1/urls.py +++ b/cms/djangoapps/contentstore/rest_api/v1/urls.py @@ -25,6 +25,7 @@ HomePageView, ProctoredExamSettingsView, ProctoringErrorsView, + UnitComponentsView, VideoDownloadView, VideoUsageView, vertical_container_children_redirect_view, @@ -144,6 +145,11 @@ CourseWaffleFlagsView.as_view(), name="course_waffle_flags" ), + re_path( + fr'^unit_handler/{settings.USAGE_KEY_PATTERN}$', + UnitComponentsView.as_view(), + name="unit_components" + ), # Authoring API # Do not use under v1 yet (Nov. 23). The Authoring API is still experimental and the v0 versions should be used diff --git a/cms/djangoapps/contentstore/rest_api/v1/views/__init__.py b/cms/djangoapps/contentstore/rest_api/v1/views/__init__.py index d4fcfd5f2e3f..25c0157904e9 100644 --- a/cms/djangoapps/contentstore/rest_api/v1/views/__init__.py +++ b/cms/djangoapps/contentstore/rest_api/v1/views/__init__.py @@ -14,9 +14,6 @@ from .proctoring import ProctoredExamSettingsView, ProctoringErrorsView from .settings import CourseSettingsView from .textbooks import CourseTextbooksView +from .unit_handler import UnitComponentsView from .vertical_block import ContainerHandlerView, vertical_container_children_redirect_view -from .videos import ( - CourseVideosView, - VideoDownloadView, - VideoUsageView, -) +from .videos import CourseVideosView, VideoDownloadView, VideoUsageView diff --git a/cms/djangoapps/contentstore/rest_api/v1/views/tests/test_course_waffle_flags.py b/cms/djangoapps/contentstore/rest_api/v1/views/tests/test_course_waffle_flags.py index f45cc48810d6..a788ce4af3b5 100644 --- a/cms/djangoapps/contentstore/rest_api/v1/views/tests/test_course_waffle_flags.py +++ b/cms/djangoapps/contentstore/rest_api/v1/views/tests/test_course_waffle_flags.py @@ -38,6 +38,7 @@ class CourseWaffleFlagsViewTest(CourseTestCase): "use_react_markdown_editor": False, "use_video_gallery_flow": False, "enable_course_optimizer_check_prev_run_links": False, + "enable_unit_expanded_view": False, } def setUp(self): diff --git a/cms/djangoapps/contentstore/rest_api/v1/views/unit_handler.py b/cms/djangoapps/contentstore/rest_api/v1/views/unit_handler.py new file mode 100644 index 000000000000..18a2376b3922 --- /dev/null +++ b/cms/djangoapps/contentstore/rest_api/v1/views/unit_handler.py @@ -0,0 +1,135 @@ +"""API Views for unit components handler""" + +import logging + +import edx_api_doc_tools as apidocs +from django.http import HttpResponseBadRequest, HttpResponseForbidden +from opaque_keys.edx.keys import UsageKey +from rest_framework.request import Request +from rest_framework.response import Response +from rest_framework.views import APIView + +from cms.djangoapps.contentstore.rest_api.v1.mixins import ContainerHandlerMixin +from cms.djangoapps.contentstore.toggles import enable_unit_expanded_view +from openedx.core.lib.api.view_utils import view_auth_classes +from xmodule.modulestore.django import modulestore +from xmodule.modulestore.exceptions import ItemNotFoundError + +log = logging.getLogger(__name__) + + +@view_auth_classes(is_authenticated=True) +class UnitComponentsView(APIView, ContainerHandlerMixin): + """ + View to get all components in a unit by usage key. + """ + + @apidocs.schema( + parameters=[ + apidocs.string_parameter( + "usage_key_string", + apidocs.ParameterLocation.PATH, + description="Unit usage key", + ), + ], + responses={ + 200: "List of components in the unit", + 400: "Invalid usage key or unit not found.", + 401: "The requester is not authenticated.", + 404: "The requested unit does not exist.", + }, + ) + def get(self, request: Request, usage_key_string: str): + """ + Get all components in a unit. + + **Example Request** + + GET /api/contentstore/v1/unit_handler/{usage_key_string} + + **Response Values** + + If the request is successful, an HTTP 200 "OK" response is returned. + + The HTTP 200 response contains a dict with a list of all components + in the unit, including their display names, block types, and block IDs. + + **Example Response** + + ```json + { + "unit_id": "block-v1:edX+DemoX+Demo_Course+type@vertical+block@vertical_id", + "display_name": "My Unit", + "components": [ + { + "block_id": "block-v1:edX+DemoX+Demo_Course+type@video+block@video_id", + "block_type": "video", + "display_name": "Introduction Video" + }, + { + "block_id": "block-v1:edX+DemoX+Demo_Course+type@html+block@html_id", + "block_type": "html", + "display_name": "Text Content" + }, + { + "block_id": "block-v1:edX+DemoX+Demo_Course+type@problem+block@problem_id", + "block_type": "problem", + "display_name": "Practice Problem" + } + ] + } + ``` + """ + try: + usage_key = UsageKey.from_string(usage_key_string) + except Exception as e: # pylint: disable=broad-exception-caught + log.error(f"Invalid usage key: {usage_key_string}, error: {str(e)}") + return HttpResponseBadRequest("Invalid usage key format") + + try: + # Get the unit xblock + unit_xblock = modulestore().get_item(usage_key) + + # Verify it's a vertical (unit) + if unit_xblock.category != "vertical": + return HttpResponseBadRequest( + "The provided usage key is not a unit (vertical)" + ) + + if not enable_unit_expanded_view(unit_xblock.location.course_key): + return HttpResponseForbidden( + "Unit expanded view is disabled for this course" + ) + + components = [] + + # Get all children (components) of the unit + if unit_xblock.has_children: + for child_usage_key in unit_xblock.children: + try: + child_xblock = modulestore().get_item(child_usage_key) + components.append( + { + "block_id": str(child_xblock.location), + "block_type": child_xblock.category, + "display_name": child_xblock.display_name_with_default, + } + ) + except ItemNotFoundError: + log.warning(f"Child block not found: {child_usage_key}") + continue + + response_data = { + "unit_id": str(usage_key), + "display_name": unit_xblock.display_name_with_default, + "components": components, + } + + return Response(response_data) + + except ItemNotFoundError: + log.error(f"Unit not found: {usage_key_string}") + return HttpResponseBadRequest("Unit not found") + except Exception as e: # pylint: disable=broad-exception-caught + log.error(f"Error retrieving unit components: {str(e)}") + return HttpResponseBadRequest(f"Error retrieving unit components: {str(e)}") diff --git a/cms/djangoapps/contentstore/tests/test_course_listing.py b/cms/djangoapps/contentstore/tests/test_course_listing.py index e46b493b7b39..a2b6f07d15ef 100644 --- a/cms/djangoapps/contentstore/tests/test_course_listing.py +++ b/cms/djangoapps/contentstore/tests/test_course_listing.py @@ -24,8 +24,10 @@ get_courses_accessible_to_user ) from common.djangoapps.course_action_state.models import CourseRerunState +from common.djangoapps.student.models.user import CourseAccessRole from common.djangoapps.student.roles import ( CourseInstructorRole, + CourseLimitedStaffRole, CourseStaffRole, GlobalStaff, OrgInstructorRole, @@ -188,6 +190,48 @@ def test_staff_course_listing(self): with self.assertNumQueries(2): list(_accessible_courses_summary_iter(self.request)) + def test_course_limited_staff_course_listing(self): + # Setup a new course + course_location = self.store.make_course_key('Org', 'CreatedCourse', 'Run') + CourseFactory.create( + org=course_location.org, + number=course_location.course, + run=course_location.run + ) + course = CourseOverviewFactory.create(id=course_location, org=course_location.org) + + # Add the user as a course_limited_staff on the course + CourseLimitedStaffRole(course.id).add_users(self.user) + self.assertTrue(CourseLimitedStaffRole(course.id).has_user(self.user)) + + # Fetch accessible courses list & verify their count + courses_list_by_staff, __ = get_courses_accessible_to_user(self.request) + + # Limited Course Staff should not be able to list courses in Studio + assert len(list(courses_list_by_staff)) == 0 + + def test_org_limited_staff_course_listing(self): + + # Setup a new course + course_location = self.store.make_course_key('Org', 'CreatedCourse', 'Run') + CourseFactory.create( + org=course_location.org, + number=course_location.course, + run=course_location.run + ) + course = CourseOverviewFactory.create(id=course_location, org=course_location.org) + + # Add a user as course_limited_staff on the org + # This is not possible using the course roles classes but is possible via Django admin so we + # insert a row into the model directly to test that scenario. + CourseAccessRole.objects.create(user=self.user, org=course_location.org, role=CourseLimitedStaffRole.ROLE) + + # Fetch accessible courses list & verify their count + courses_list_by_staff, __ = get_courses_accessible_to_user(self.request) + + # Limited Course Staff should not be able to list courses in Studio + assert len(list(courses_list_by_staff)) == 0 + def test_get_course_list_with_invalid_course_location(self): """ Test getting courses with invalid course location (course deleted from modulestore). diff --git a/cms/djangoapps/contentstore/tests/test_exams.py b/cms/djangoapps/contentstore/tests/test_exams.py index 798b5e51fd80..823038957714 100644 --- a/cms/djangoapps/contentstore/tests/test_exams.py +++ b/cms/djangoapps/contentstore/tests/test_exams.py @@ -65,16 +65,21 @@ def _get_exam_due_date(self, course, sequential): Return the expected exam due date for the exam, based on the selected course proctoring provider and the exam due date or the course end date. + This is a copy of the due date computation logic in register_exams function. + Arguments: * course: the course that the exam subsection is in; may have a course.end attribute * sequential: the exam subsection; may have a sequential.due attribute """ + is_instructor_paced = not course.self_paced if course.proctoring_provider == 'lti_external': - return sequential.due.isoformat() if sequential.due else (course.end.isoformat() if course.end else None) - elif course.self_paced: - return None + due_date_source = sequential.due if is_instructor_paced else course.end else: - return sequential.due + due_date_source = sequential.due if is_instructor_paced else None + + due_date = due_date_source.isoformat() if due_date_source else None + + return due_date @ddt.data(*(tuple(base) + (extra,) for base, extra in itertools.product( [ @@ -185,14 +190,13 @@ def test_feature_flag_off(self, mock_patch_course_exams): def test_no_due_dates(self, is_self_paced, course_end_date, proctoring_provider, mock_patch_course_exams): """ Test that the the correct due date is registered for the exam when the subsection does not have a due date, - depending on the proctoring provider. + depending on the proctoring provider and course pacing type. * lti_external - * The course end date is registered as the due date when the subsection does not have a due date for both - self-paced and instructor-paced exams. + * If the course is instructor-paced, the exam due date is the subsection due date if it exists, else None. + * If the course is self-paced, the exam due date is the course end date if it exists, else None. * not lti_external - * None is registered as the due date when the subsection does not have a due date for both - self-paced and instructor-paced exams. + * The exam due date is always the subsection due date if it exists, else None. """ self.course.self_paced = is_self_paced self.course.end = course_end_date @@ -222,25 +226,17 @@ def test_no_due_dates(self, is_self_paced, course_end_date, proctoring_provider, @ddt.data(*itertools.product((True, False), ('lti_external', 'null'))) @ddt.unpack @freeze_time('2024-01-01') - def test_subsection_due_date_prioritized(self, is_self_paced, proctoring_provider, mock_patch_course_exams): + def test_subsection_due_date_prioritized_instructor_paced( + self, + is_self_paced, + proctoring_provider, + mock_patch_course_exams + ): """ - Test that the subsection due date is registered as the due date when both the subsection has a due date and the - course has an end date for both self-paced and instructor-paced exams. - - Test that the the correct due date is registered for the exam when the subsection has a due date, depending on - the proctoring provider. - - * lti_external - * The subsection due date is registered as the due date when both the subsection has a due date and the - course has an end date for both self-paced and instructor-paced exams - * not lti_external - * None is registered as the due date when both the subsection has a due date and the course has an end date - for self-paced exams. - * The subsection due date is registered as the due date when both the subsection has a due date and the - course has an end date for instructor-paced exams. + Test that exam due date is computed correctly. """ self.course.self_paced = is_self_paced - self.course.end = datetime(2035, 1, 1, 0, 0) + self.course.end = datetime(2035, 1, 1, 0, 0, tzinfo=timezone.utc) self.course.proctoring_provider = proctoring_provider self.course = self.update_course(self.course, 1) @@ -260,7 +256,7 @@ def test_subsection_due_date_prioritized(self, is_self_paced, proctoring_provide ) listen_for_course_publish(self, self.course.id) - called_exams, called_course = mock_patch_course_exams.call_args[0] + called_exams, _ = mock_patch_course_exams.call_args[0] expected_due_date = self._get_exam_due_date(self.course, sequence) diff --git a/cms/djangoapps/contentstore/toggles.py b/cms/djangoapps/contentstore/toggles.py index c287f8c4dbec..c3dba4a6f4a4 100644 --- a/cms/djangoapps/contentstore/toggles.py +++ b/cms/djangoapps/contentstore/toggles.py @@ -682,3 +682,23 @@ def enable_course_optimizer_check_prev_run_links(course_key): Returns a boolean if previous run course optimizer feature is enabled for the given course. """ return ENABLE_COURSE_OPTIMIZER_CHECK_PREV_RUN_LINKS.is_enabled(course_key) + + +# .. toggle_name: contentstore.enable_unit_expanded_view +# .. toggle_implementation: CourseWaffleFlag +# .. toggle_default: False +# .. toggle_description: When enabled, the Unit Expanded View feature in the Course Outline is activated. +# .. toggle_use_cases: temporary +# .. toggle_creation_date: 2026-01-01 +# .. toggle_target_removal_date: 2026-06-01 +# .. toggle_tickets: TNL2-473 +ENABLE_UNIT_EXPANDED_VIEW = CourseWaffleFlag( + f"{CONTENTSTORE_NAMESPACE}.enable_unit_expanded_view", __name__ +) + + +def enable_unit_expanded_view(course_key): + """ + Returns a boolean if the Unit Expanded View feature is enabled for the given course. + """ + return ENABLE_UNIT_EXPANDED_VIEW.is_enabled(course_key) diff --git a/cms/djangoapps/contentstore/views/component.py b/cms/djangoapps/contentstore/views/component.py index 34c1f465c566..82db51f958fe 100644 --- a/cms/djangoapps/contentstore/views/component.py +++ b/cms/djangoapps/contentstore/views/component.py @@ -43,6 +43,12 @@ from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order from xmodule.modulestore.exceptions import ItemNotFoundError # lint-amnesty, pylint: disable=wrong-import-order +try: + from games.toggles import is_games_xblock_enabled # pylint: disable=import-error +except ImportError: + def is_games_xblock_enabled(): + return False + __all__ = [ 'container_handler', 'component_handler', @@ -66,6 +72,10 @@ BETA_COMPONENT_TYPES = ['library_v2', 'itembank'] +if is_games_xblock_enabled(): + COMPONENT_TYPES.append('games') + BETA_COMPONENT_TYPES.append('games') + ADVANCED_COMPONENT_TYPES = sorted({name for name, class_ in XBlock.load_classes()} - set(COMPONENT_TYPES)) ADVANCED_PROBLEM_TYPES = settings.ADVANCED_PROBLEM_TYPES @@ -288,6 +298,8 @@ def create_support_legend_dict(): 'itembank': _("Problem Bank"), 'drag-and-drop-v2': _("Drag and Drop"), } + if is_games_xblock_enabled(): + component_display_names['games'] = _("Games") component_templates = [] categories = set() diff --git a/cms/djangoapps/contentstore/views/course.py b/cms/djangoapps/contentstore/views/course.py index fa8769dc0cb9..a93a35cf1d3c 100644 --- a/cms/djangoapps/contentstore/views/course.py +++ b/cms/djangoapps/contentstore/views/course.py @@ -56,6 +56,7 @@ GlobalStaff, UserBasedRole, OrgStaffRole, + strict_role_checking, ) from common.djangoapps.util.json_request import JsonResponse, JsonResponseBadRequest, expect_json from common.djangoapps.util.string_utils import _has_non_ascii_characters @@ -536,7 +537,9 @@ def filter_ccx(course_access): return not isinstance(course_access.course_id, CCXLocator) instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role() - staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role() + with strict_role_checking(): + staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role() + all_courses = list(filter(filter_ccx, instructor_courses | staff_courses)) courses_list = [] course_keys = {} diff --git a/cms/static/images/large-games-icon.svg b/cms/static/images/large-games-icon.svg new file mode 100644 index 000000000000..9c862ef6c194 --- /dev/null +++ b/cms/static/images/large-games-icon.svg @@ -0,0 +1,10 @@ + diff --git a/cms/static/js/views/pages/container.js b/cms/static/js/views/pages/container.js index d50f6b4bbe4a..88a0f3397f2a 100644 --- a/cms/static/js/views/pages/container.js +++ b/cms/static/js/views/pages/container.js @@ -301,6 +301,12 @@ function($, _, Backbone, gettext, BasePage, this.xblockView.refresh(xblockView, block_added, is_duplicate); // Update publish and last modified information from the server. this.model.fetch(); + + // Auto-open editor for games blocks when first added + if (block_added && !is_duplicate && xblockView && xblockView.$el && + xblockView.$el.find('.xblock-header-primary').attr('data-block-type') === 'games') { + setTimeout(function() { xblockView.$el.find('.edit-button').first().trigger('click'); }, 500); + } }, renderAddXBlockComponents: function() { @@ -512,6 +518,7 @@ function($, _, Backbone, gettext, BasePage, if((useNewTextEditor === 'True' && blockType === 'html') || (useNewVideoEditor === 'True' && blockType === 'video') || (useNewProblemEditor === 'True' && blockType === 'problem') + || (blockType === 'games') ) { var destinationUrl = primaryHeader.attr('authoring_MFE_base_url') + '/' + blockType diff --git a/cms/static/sass/assets/_graphics.scss b/cms/static/sass/assets/_graphics.scss index afb830d5dd71..58141c445a75 100644 --- a/cms/static/sass/assets/_graphics.scss +++ b/cms/static/sass/assets/_graphics.scss @@ -80,3 +80,9 @@ height: ($baseline*3); background: url('#{$static-path}/images/large-itembank-icon.png') center no-repeat; } + +.large-games-icon { + display: inline-block; + width: ($baseline*3); + height: ($baseline*3); + background: url('#{$static-path}/images/large-games-icon.svg') center no-repeat; } diff --git a/cms/static/sass/elements/_modules.scss b/cms/static/sass/elements/_modules.scss index 1e9f52691fc8..f1c871ba1d95 100644 --- a/cms/static/sass/elements/_modules.scss +++ b/cms/static/sass/elements/_modules.scss @@ -163,8 +163,8 @@ display: inline-block; color: $uxpl-green-base; - background-color: theme-color("inverse"); - border-color: theme-color("inverse"); + background-color: $white; + border-color: $white; border-radius: 3px; font-size: 90%; diff --git a/cms/templates/js/show-correctness-editor.underscore b/cms/templates/js/show-correctness-editor.underscore index 3db6c3c27a5c..1b0dd896747a 100644 --- a/cms/templates/js/show-correctness-editor.underscore +++ b/cms/templates/js/show-correctness-editor.underscore @@ -35,6 +35,13 @@ <% } %> <%- gettext('If the subsection does not have a due date, learners always see their scores when they submit answers to assessments.') %>
+ + diff --git a/common/djangoapps/student/auth.py b/common/djangoapps/student/auth.py index e199142fe377..047f0174a062 100644 --- a/common/djangoapps/student/auth.py +++ b/common/djangoapps/student/auth.py @@ -24,6 +24,7 @@ OrgInstructorRole, OrgLibraryUserRole, OrgStaffRole, + strict_role_checking, ) # Studio permissions: @@ -115,8 +116,9 @@ def get_user_permissions(user, course_key, org=None, service_variant=None): return STUDIO_NO_PERMISSIONS # Staff have all permissions except EDIT_ROLES: - if OrgStaffRole(org=org).has_user(user) or (course_key and user_has_role(user, CourseStaffRole(course_key))): - return STUDIO_VIEW_USERS | STUDIO_EDIT_CONTENT | STUDIO_VIEW_CONTENT + with strict_role_checking(): + if OrgStaffRole(org=org).has_user(user) or (course_key and user_has_role(user, CourseStaffRole(course_key))): + return STUDIO_VIEW_USERS | STUDIO_EDIT_CONTENT | STUDIO_VIEW_CONTENT # Otherwise, for libraries, users can view only: if course_key and isinstance(course_key, LibraryLocator): diff --git a/common/djangoapps/student/tests/test_authz.py b/common/djangoapps/student/tests/test_authz.py index c0b88e6318b5..70636e04b68a 100644 --- a/common/djangoapps/student/tests/test_authz.py +++ b/common/djangoapps/student/tests/test_authz.py @@ -11,6 +11,7 @@ from django.test import TestCase, override_settings from opaque_keys.edx.locator import CourseLocator +from common.djangoapps.student.models.user import CourseAccessRole from common.djangoapps.student.auth import ( add_users, has_studio_read_access, @@ -305,6 +306,23 @@ def test_limited_staff_no_studio_access_cms(self): assert not has_studio_read_access(self.limited_staff, self.course_key) assert not has_studio_write_access(self.limited_staff, self.course_key) + @override_settings(SERVICE_VARIANT='cms') + def test_limited_org_staff_no_studio_access_cms(self): + """ + Verifies that course limited staff have no read and no write access when SERVICE_VARIANT is not 'lms'. + """ + # Add a user as course_limited_staff on the org + # This is not possible using the course roles classes but is possible via Django admin so we + # insert a row into the model directly to test that scenario. + CourseAccessRole.objects.create( + user=self.limited_staff, + org=self.course_key.org, + role=CourseLimitedStaffRole.ROLE, + ) + + assert not has_studio_read_access(self.limited_staff, self.course_key) + assert not has_studio_write_access(self.limited_staff, self.course_key) + class CourseOrgGroupTest(TestCase): """ diff --git a/common/djangoapps/third_party_auth/api/serializers.py b/common/djangoapps/third_party_auth/api/serializers.py index 3e8513de7312..a510cbe07a07 100644 --- a/common/djangoapps/third_party_auth/api/serializers.py +++ b/common/djangoapps/third_party_auth/api/serializers.py @@ -20,4 +20,7 @@ def get_username(self, social_user): def get_remote_id(self, social_user): """ Gets remote id from social user based on provider """ + remote_id_field_name = self.context.get('remote_id_field_name', None) + if remote_id_field_name: + return self.provider.get_remote_id_from_field_name(social_user, remote_id_field_name) return self.provider.get_remote_id_from_social_auth(social_user) diff --git a/common/djangoapps/third_party_auth/api/tests/test_views.py b/common/djangoapps/third_party_auth/api/tests/test_views.py index f7834001d66b..61740268db90 100644 --- a/common/djangoapps/third_party_auth/api/tests/test_views.py +++ b/common/djangoapps/third_party_auth/api/tests/test_views.py @@ -38,8 +38,10 @@ PASSWORD = "edx" -def get_mapping_data_by_usernames(usernames): +def get_mapping_data_by_usernames(usernames, remote_id_field_name=False): """ Generate mapping data used in response """ + if remote_id_field_name: + return [{'username': username, 'remote_id': 'external_' + username} for username in usernames] return [{'username': username, 'remote_id': 'remote_' + username} for username in usernames] @@ -76,11 +78,13 @@ def setUp(self): # pylint: disable=arguments-differ provider=google.backend_name, uid=f'{username}@gmail.com', ) - UserSocialAuth.objects.create( + usa = UserSocialAuth.objects.create( user=user, provider=testshib.backend_name, uid=f'{testshib.slug}:remote_{username}', ) + usa.set_extra_data({'external_user_id': f'external_{username}'}) + usa.refresh_from_db() # Create another user not linked to any providers: UserFactory.create(username=CARL_USERNAME, email=f'{CARL_USERNAME}@example.com', password=PASSWORD) @@ -304,12 +308,20 @@ def test_list_all_user_mappings_oauth2(self, valid_call, expect_code, expect_dat @ddt.data( ({'username': [ALICE_USERNAME, STAFF_USERNAME]}, 200, get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME])), + ({'username': [ALICE_USERNAME, STAFF_USERNAME], 'remote_id_field_name': 'external_user_id'}, 200, + get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME], remote_id_field_name=True)), ({'remote_id': ['remote_' + ALICE_USERNAME, 'remote_' + STAFF_USERNAME, 'remote_' + CARL_USERNAME]}, 200, get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME])), + ({'remote_id': ['remote_' + ALICE_USERNAME, 'remote_' + STAFF_USERNAME, 'remote_' + CARL_USERNAME], + 'remote_id_field_name': 'external_user_id'}, 200, + get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME], remote_id_field_name=True)), ({'username': [ALICE_USERNAME, CARL_USERNAME, STAFF_USERNAME]}, 200, get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME])), ({'username': [ALICE_USERNAME], 'remote_id': ['remote_' + STAFF_USERNAME]}, 200, get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME])), + ({'username': [ALICE_USERNAME], 'remote_id': ['remote_' + STAFF_USERNAME], + 'remote_id_field_name': 'external_user_id'}, 200, + get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME], remote_id_field_name=True)), ) @ddt.unpack def test_user_mappings_with_query_params_comma_separated(self, query_params, expect_code, expect_data): @@ -321,6 +333,8 @@ def test_user_mappings_with_query_params_comma_separated(self, query_params, exp for attr in ['username', 'remote_id']: if attr in query_params: params.append('{}={}'.format(attr, ','.join(query_params[attr]))) + if 'remote_id_field_name' in query_params: + params.append('remote_id_field_name={}'.format(query_params['remote_id_field_name'])) url = "{}?{}".format(base_url, '&'.join(params)) response = self.client.get(url, HTTP_X_EDX_API_KEY=VALID_API_KEY) self._verify_response(response, expect_code, expect_data) @@ -328,12 +342,20 @@ def test_user_mappings_with_query_params_comma_separated(self, query_params, exp @ddt.data( ({'username': [ALICE_USERNAME, STAFF_USERNAME]}, 200, get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME])), + ({'username': [ALICE_USERNAME, STAFF_USERNAME], 'remote_id_field_name': 'external_user_id'}, 200, + get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME], remote_id_field_name=True)), ({'remote_id': ['remote_' + ALICE_USERNAME, 'remote_' + STAFF_USERNAME, 'remote_' + CARL_USERNAME]}, 200, get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME])), + ({'remote_id': ['remote_' + ALICE_USERNAME, 'remote_' + STAFF_USERNAME, 'remote_' + CARL_USERNAME], + 'remote_id_field_name': 'external_user_id'}, 200, + get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME], remote_id_field_name=True)), ({'username': [ALICE_USERNAME, CARL_USERNAME, STAFF_USERNAME]}, 200, get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME])), ({'username': [ALICE_USERNAME], 'remote_id': ['remote_' + STAFF_USERNAME]}, 200, get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME])), + ({'username': [ALICE_USERNAME], 'remote_id': ['remote_' + STAFF_USERNAME], + 'remote_id_field_name': 'external_user_id'}, 200, + get_mapping_data_by_usernames([ALICE_USERNAME, STAFF_USERNAME], remote_id_field_name=True)), ) @ddt.unpack def test_user_mappings_with_query_params_multi_value_key(self, query_params, expect_code, expect_data): @@ -345,6 +367,8 @@ def test_user_mappings_with_query_params_multi_value_key(self, query_params, exp for attr in ['username', 'remote_id']: if attr in query_params: params.setlist(attr, query_params[attr]) + if 'remote_id_field_name' in query_params: + params['remote_id_field_name'] = query_params['remote_id_field_name'] url = f"{base_url}?{params.urlencode()}" response = self.client.get(url, HTTP_X_EDX_API_KEY=VALID_API_KEY) self._verify_response(response, expect_code, expect_data) diff --git a/common/djangoapps/third_party_auth/api/views.py b/common/djangoapps/third_party_auth/api/views.py index c2b8b0dd6f39..89d55e2eecdc 100644 --- a/common/djangoapps/third_party_auth/api/views.py +++ b/common/djangoapps/third_party_auth/api/views.py @@ -323,6 +323,9 @@ class UserMappingView(ListAPIView): GET /api/third_party_auth/v0/providers/{provider_id}/users?username={username1},{username2} + GET /api/third_party_auth/v0/providers/{provider_id}/users?username={username1}& + remote_id_field_name={external_id_field_name} + GET /api/third_party_auth/v0/providers/{provider_id}/users?username={username1}&usernames={username2} GET /api/third_party_auth/v0/providers/{provider_id}/users?remote_id={remote_id1},{remote_id2} @@ -346,6 +349,9 @@ class UserMappingView(ListAPIView): * usernames: Optional. List of comma separated edX usernames to filter the result set. e.g. ?usernames=bob123,jane456 + * remote_id_field_name: Optional. The field name to use for the remote id lookup. + Useful when learners are coming from external LMS. e.g. ?remote_id_field_name=ext_userid_sf + * page, page_size: Optional. Used for paging the result set, especially when getting an unfiltered list. @@ -415,6 +421,7 @@ def get_serializer_context(self): remove idp_slug from the remote_id if there is any """ context = super().get_serializer_context() + context['remote_id_field_name'] = self.request.query_params.get('remote_id_field_name', None) context['provider'] = self.provider return context diff --git a/common/djangoapps/third_party_auth/management/commands/saml.py b/common/djangoapps/third_party_auth/management/commands/saml.py index afe369c2ade0..6865ebf69987 100644 --- a/common/djangoapps/third_party_auth/management/commands/saml.py +++ b/common/djangoapps/third_party_auth/management/commands/saml.py @@ -6,7 +6,6 @@ import logging from django.core.management.base import BaseCommand, CommandError -from edx_django_utils.monitoring import set_custom_attribute from common.djangoapps.third_party_auth.tasks import fetch_saml_metadata from common.djangoapps.third_party_auth.models import SAMLProviderConfig, SAMLConfiguration @@ -71,31 +70,28 @@ def _handle_run_checks(self): """ Handle the --run-checks option for checking SAMLProviderConfig configuration issues. - This is a report-only command. It identifies potential configuration problems such as: - - Outdated SAMLConfiguration references (provider pointing to old config version) - - Site ID mismatches between SAMLProviderConfig and its SAMLConfiguration - - Slug mismatches (except 'default' slugs) # noqa: E501 - - SAMLProviderConfig objects with null SAMLConfiguration references (informational) - - Includes observability attributes for monitoring. + This is a report-only command that identifies potential configuration problems. """ - # Set custom attributes for monitoring the check operation - # .. custom_attribute_name: saml_management_command.operation - # .. custom_attribute_description: Records current SAML operation ('run_checks'). - set_custom_attribute('saml_management_command.operation', 'run_checks') - metrics = self._check_provider_configurations() self._report_check_summary(metrics) def _check_provider_configurations(self): """ - Check each provider configuration for potential issues. + Check each provider configuration for potential issues: + - Outdated configuration references + - Site ID mismatches + - Missing configurations (no direct config and no default) + - Disabled providers and configurations + Also reports informational data such as slug mismatches. + + See code comments near each log output for possible resolution details. Returns a dictionary of metrics about the found issues. """ outdated_count = 0 site_mismatch_count = 0 slug_mismatch_count = 0 null_config_count = 0 + disabled_config_count = 0 error_count = 0 total_providers = 0 @@ -107,53 +103,74 @@ def _check_provider_configurations(self): for provider_config in provider_configs: total_providers += 1 + + # Check if provider is disabled + provider_disabled = not provider_config.enabled + disabled_status = ", enabled=False" if provider_disabled else "" + provider_info = ( - f"Provider (id={provider_config.id}, name={provider_config.name}, " - f"slug={provider_config.slug}, site_id={provider_config.site_id})" + f"Provider (id={provider_config.id}, " + f"name={provider_config.name}, slug={provider_config.slug}, " + f"site_id={provider_config.site_id}{disabled_status})" ) - if not provider_config.saml_configuration: - self.stdout.write( - f"[INFO] {provider_info} has no SAML configuration because " - "a matching default was not found." - ) - null_config_count += 1 - continue + # Provider disabled status is already included in provider_info format try: + if not provider_config.saml_configuration: + null_config_count, disabled_config_count = self._check_no_config( + provider_config, provider_info, null_config_count, disabled_config_count + ) + continue + + # Check if SAML configuration is disabled + if not provider_config.saml_configuration.enabled: + # Resolution: Enable the SAML configuration in Django admin + # or assign a different configuration + self.stdout.write( + f"[WARNING] {provider_info} " + f"has SAML config (id={provider_config.saml_configuration_id}, enabled=False)." + ) + disabled_config_count += 1 + + # Check configuration currency current_config = SAMLConfiguration.current( provider_config.saml_configuration.site_id, provider_config.saml_configuration.slug ) - # Check for outdated configuration references - if current_config: - if current_config.id != provider_config.saml_configuration_id: - self.stdout.write( - f"[WARNING] {provider_info} " - f"has outdated SAML config (id={provider_config.saml_configuration_id} which " - f"should be updated to the current SAML config (id={current_config.id})." - ) - outdated_count += 1 + if current_config and (current_config.id != provider_config.saml_configuration_id): + # Resolution: Update the provider's saml_configuration_id to the current config ID + self.stdout.write( + f"[WARNING] {provider_info} " + f"has outdated SAML config (id={provider_config.saml_configuration_id}) which " + f"should be updated to the current SAML config (id={current_config.id})." + ) + outdated_count += 1 + # Check site ID match if provider_config.saml_configuration.site_id != provider_config.site_id: config_site_id = provider_config.saml_configuration.site_id - provider_site_id = provider_config.site_id + # Resolution: Create a new SAML configuration for the correct site + # or move the provider to the matching site self.stdout.write( f"[WARNING] {provider_info} " - f"SAML config (id={provider_config.saml_configuration_id}, site_id={config_site_id}) " - "does not match the provider's site_id." + f"SAML config (id={provider_config.saml_configuration_id}, " + f"site_id={config_site_id}) does not match the provider's site_id." ) site_mismatch_count += 1 - saml_configuration_slug = provider_config.saml_configuration.slug - provider_config_slug = provider_config.slug - - if saml_configuration_slug not in (provider_config_slug, 'default'): + # Check slug match + if provider_config.saml_configuration.slug not in (provider_config.slug, 'default'): + config_id = provider_config.saml_configuration_id + saml_configuration_slug = provider_config.saml_configuration.slug + config_disabled_status = ", enabled=False" if not provider_config.saml_configuration.enabled else "" + # Resolution: This is informational only - provider can use + # a different slug configuration self.stdout.write( - f"[WARNING] {provider_info} " - f"SAML config (id={provider_config.saml_configuration_id}, slug='{saml_configuration_slug}') " - "does not match the provider's slug." + f"[INFO] {provider_info} has " + f"SAML config (id={config_id}, slug='{saml_configuration_slug}'{config_disabled_status}) " + "that does not match the provider's slug." ) slug_mismatch_count += 1 @@ -165,41 +182,64 @@ def _check_provider_configurations(self): 'total_providers': {'count': total_providers, 'requires_attention': False}, 'outdated_count': {'count': outdated_count, 'requires_attention': True}, 'site_mismatch_count': {'count': site_mismatch_count, 'requires_attention': True}, - 'slug_mismatch_count': {'count': slug_mismatch_count, 'requires_attention': True}, + 'slug_mismatch_count': {'count': slug_mismatch_count, 'requires_attention': False}, 'null_config_count': {'count': null_config_count, 'requires_attention': False}, + 'disabled_config_count': {'count': disabled_config_count, 'requires_attention': True}, 'error_count': {'count': error_count, 'requires_attention': True}, } - for key, metric_data in metrics.items(): - # .. custom_attribute_name: saml_management_command.{key} - # .. custom_attribute_description: Records metrics from SAML configuration checks. - set_custom_attribute(f'saml_management_command.{key}', metric_data['count']) - return metrics + def _check_no_config(self, provider_config, provider_info, null_config_count, disabled_config_count): + """Helper to check providers with no direct SAML configuration.""" + default_config = SAMLConfiguration.current(provider_config.site_id, 'default') + if not default_config or default_config.id is None: + # Resolution: Create/Link a SAML configuration for this provider + # or create/link a default configuration for the site + self.stdout.write( + f"[WARNING] {provider_info} has no direct SAML configuration and " + "no matching default configuration was found." + ) + null_config_count += 1 + + elif not default_config.enabled: + # Resolution: Enable the provider's linked SAML configuration + # or create/link a specific configuration for this provider + self.stdout.write( + f"[WARNING] {provider_info} has no direct SAML configuration and " + f"the default configuration (id={default_config.id}, enabled=False)." + ) + disabled_config_count += 1 + + return null_config_count, disabled_config_count + def _report_check_summary(self, metrics): """ - Print a summary of the check results and set the total_requiring_attention custom attribute. + Print a summary of the check results. """ total_requiring_attention = sum( metric_data['count'] for metric_data in metrics.values() if metric_data['requires_attention'] ) - # .. custom_attribute_name: saml_management_command.total_requiring_attention - # .. custom_attribute_description: The total number of configuration issues requiring attention. - set_custom_attribute('saml_management_command.total_requiring_attention', total_requiring_attention) - self.stdout.write(self.style.SUCCESS("CHECK SUMMARY:")) self.stdout.write(f" Providers checked: {metrics['total_providers']['count']}") - self.stdout.write(f" Null configs: {metrics['null_config_count']['count']}") + self.stdout.write("") + + # Informational only section + self.stdout.write("Informational only:") + self.stdout.write(f" Slug mismatches: {metrics['slug_mismatch_count']['count']}") + self.stdout.write(f" Missing configs: {metrics['null_config_count']['count']}") + self.stdout.write("") + # Issues requiring attention section if total_requiring_attention > 0: - self.stdout.write("\nIssues requiring attention:") + self.stdout.write("Issues requiring attention:") self.stdout.write(f" Outdated: {metrics['outdated_count']['count']}") self.stdout.write(f" Site mismatches: {metrics['site_mismatch_count']['count']}") - self.stdout.write(f" Slug mismatches: {metrics['slug_mismatch_count']['count']}") + self.stdout.write(f" Disabled configs: {metrics['disabled_config_count']['count']}") self.stdout.write(f" Errors: {metrics['error_count']['count']}") - self.stdout.write(f"\nTotal issues requiring attention: {total_requiring_attention}") + self.stdout.write("") + self.stdout.write(f"Total issues requiring attention: {total_requiring_attention}") else: - self.stdout.write(self.style.SUCCESS("\nNo configuration issues found!")) + self.stdout.write(self.style.SUCCESS("No configuration issues found!")) diff --git a/common/djangoapps/third_party_auth/management/commands/tests/test_saml.py b/common/djangoapps/third_party_auth/management/commands/tests/test_saml.py index 6963d5dcd0d5..d80c9146664b 100644 --- a/common/djangoapps/third_party_auth/management/commands/tests/test_saml.py +++ b/common/djangoapps/third_party_auth/management/commands/tests/test_saml.py @@ -79,6 +79,7 @@ def setUp(self): name='TestShib College', entity_id='https://idp.testshib.org/idp/shibboleth', metadata_source='https://www.testshib.org/metadata/testshib-providers.xml', + saml_configuration=self.saml_config, ) def _setup_test_configs_for_run_checks(self): @@ -337,8 +338,30 @@ def _run_checks_command(self): call_command('saml', '--run-checks', stdout=out) return out.getvalue() - @mock.patch('common.djangoapps.third_party_auth.management.commands.saml.set_custom_attribute') - def test_run_checks_outdated_configs(self, mock_set_custom_attribute): + def test_run_checks_setup_test_data(self): + """ + Test the --run-checks command against initial setup test data. + + This test validates that the base setup data (from setUp) is correctly + identified as having configuration issues. The setup includes a provider + (self.provider_config) with a disabled SAML configuration (self.saml_config), + which is reported as a disabled config issue (not a missing config). + """ + output = self._run_checks_command() + + # The setup data includes a provider with a disabled SAML config + expected_warning = ( + f'[WARNING] Provider (id={self.provider_config.id}, ' + f'name={self.provider_config.name}, ' + f'slug={self.provider_config.slug}, ' + f'site_id={self.provider_config.site_id}) ' + f'has SAML config (id={self.saml_config.id}, enabled=False).' + ) + self.assertIn(expected_warning, output) + self.assertIn('Missing configs: 0', output) # No missing configs from setUp + self.assertIn('Disabled configs: 1', output) # From setUp: provider_config with disabled saml_config + + def test_run_checks_outdated_configs(self): """ Test the --run-checks command identifies outdated configurations. """ @@ -346,31 +369,18 @@ def test_run_checks_outdated_configs(self, mock_set_custom_attribute): output = self._run_checks_command() - self.assertIn('[WARNING]', output) - self.assertIn('test-provider', output) - self.assertIn( - f'id={old_config.id} which should be updated to the current SAML config (id={new_config.id})', - output + expected_warning = ( + f'[WARNING] Provider (id={test_provider_config.id}, name={test_provider_config.name}, ' + f'slug={test_provider_config.slug}, site_id={test_provider_config.site_id}) ' + f'has outdated SAML config (id={old_config.id}) which should be updated to ' + f'the current SAML config (id={new_config.id}).' ) - self.assertIn('CHECK SUMMARY:', output) - self.assertIn('Providers checked: 2', output) + self.assertIn(expected_warning, output) self.assertIn('Outdated: 1', output) + # Total includes: 1 outdated + 2 disabled configs (setUp + test's old_config which is also disabled) + self.assertIn('Total issues requiring attention: 3', output) - # Check key observability calls - expected_calls = [ - mock.call('saml_management_command.operation', 'run_checks'), - mock.call('saml_management_command.total_providers', 2), - mock.call('saml_management_command.outdated_count', 1), - mock.call('saml_management_command.site_mismatch_count', 0), - mock.call('saml_management_command.slug_mismatch_count', 1), - mock.call('saml_management_command.null_config_count', 1), - mock.call('saml_management_command.error_count', 0), - mock.call('saml_management_command.total_requiring_attention', 2), - ] - mock_set_custom_attribute.assert_has_calls(expected_calls, any_order=False) - - @mock.patch('common.djangoapps.third_party_auth.management.commands.saml.set_custom_attribute') - def test_run_checks_site_mismatches(self, mock_set_custom_attribute): + def test_run_checks_site_mismatches(self): """ Test the --run-checks command identifies site ID mismatches. """ @@ -380,7 +390,7 @@ def test_run_checks_site_mismatches(self, mock_set_custom_attribute): entity_id='https://example.com' ) - SAMLProviderConfigFactory.create( + provider = SAMLProviderConfigFactory.create( site=self.site, slug='test-provider', saml_configuration=config @@ -388,25 +398,17 @@ def test_run_checks_site_mismatches(self, mock_set_custom_attribute): output = self._run_checks_command() - self.assertIn('[WARNING]', output) - self.assertIn('test-provider', output) - self.assertIn('does not match the provider\'s site_id', output) - - # Check observability calls - expected_calls = [ - mock.call('saml_management_command.operation', 'run_checks'), - mock.call('saml_management_command.total_providers', 2), - mock.call('saml_management_command.outdated_count', 0), - mock.call('saml_management_command.site_mismatch_count', 1), - mock.call('saml_management_command.slug_mismatch_count', 1), - mock.call('saml_management_command.null_config_count', 1), - mock.call('saml_management_command.error_count', 0), - mock.call('saml_management_command.total_requiring_attention', 2), - ] - mock_set_custom_attribute.assert_has_calls(expected_calls, any_order=False) - - @mock.patch('common.djangoapps.third_party_auth.management.commands.saml.set_custom_attribute') - def test_run_checks_slug_mismatches(self, mock_set_custom_attribute): + expected_warning = ( + f'[WARNING] Provider (id={provider.id}, name={provider.name}, ' + f'slug={provider.slug}, site_id={provider.site_id}) ' + f'SAML config (id={config.id}, site_id={config.site_id}) does not match the provider\'s site_id.' + ) + self.assertIn(expected_warning, output) + self.assertIn('Site mismatches: 1', output) + # Total includes: 1 site mismatch + 1 disabled config (from setUp) + self.assertIn('Total issues requiring attention: 2', output) + + def test_run_checks_slug_mismatches(self): """ Test the --run-checks command identifies slug mismatches. """ @@ -416,7 +418,7 @@ def test_run_checks_slug_mismatches(self, mock_set_custom_attribute): entity_id='https://example.com' ) - SAMLProviderConfigFactory.create( + provider = SAMLProviderConfigFactory.create( site=self.site, slug='provider-slug', saml_configuration=config @@ -424,29 +426,23 @@ def test_run_checks_slug_mismatches(self, mock_set_custom_attribute): output = self._run_checks_command() - self.assertIn('[WARNING]', output) - self.assertIn('provider-slug', output) - self.assertIn('does not match the provider\'s slug', output) - - # Check observability calls - expected_calls = [ - mock.call('saml_management_command.operation', 'run_checks'), - mock.call('saml_management_command.total_providers', 2), - mock.call('saml_management_command.outdated_count', 0), - mock.call('saml_management_command.site_mismatch_count', 0), - mock.call('saml_management_command.slug_mismatch_count', 1), - mock.call('saml_management_command.null_config_count', 1), - mock.call('saml_management_command.error_count', 0), - mock.call('saml_management_command.total_requiring_attention', 1), - ] - mock_set_custom_attribute.assert_has_calls(expected_calls, any_order=False) - - @mock.patch('common.djangoapps.third_party_auth.management.commands.saml.set_custom_attribute') - def test_run_checks_null_configurations(self, mock_set_custom_attribute): + expected_info = ( + f'[INFO] Provider (id={provider.id}, name={provider.name}, ' + f'slug={provider.slug}, site_id={provider.site_id}) ' + f'has SAML config (id={config.id}, slug=\'{config.slug}\') ' + f'that does not match the provider\'s slug.' + ) + self.assertIn(expected_info, output) + self.assertIn('Slug mismatches: 1', output) + + def test_run_checks_null_configurations(self): """ Test the --run-checks command identifies providers with null configurations. + This test verifies that providers with no direct SAML configuration and no + default configuration available are properly reported. """ - SAMLProviderConfigFactory.create( + # Create a provider with no SAML configuration on a site that has no default config + provider = SAMLProviderConfigFactory.create( site=self.site, slug='null-provider', saml_configuration=None @@ -454,19 +450,101 @@ def test_run_checks_null_configurations(self, mock_set_custom_attribute): output = self._run_checks_command() - self.assertIn('[INFO]', output) - self.assertIn('null-provider', output) - self.assertIn('has no SAML configuration because a matching default was not found', output) - - # Check observability calls - expected_calls = [ - mock.call('saml_management_command.operation', 'run_checks'), - mock.call('saml_management_command.total_providers', 2), - mock.call('saml_management_command.outdated_count', 0), - mock.call('saml_management_command.site_mismatch_count', 0), - mock.call('saml_management_command.slug_mismatch_count', 0), - mock.call('saml_management_command.null_config_count', 2), - mock.call('saml_management_command.error_count', 0), - mock.call('saml_management_command.total_requiring_attention', 0), - ] - mock_set_custom_attribute.assert_has_calls(expected_calls, any_order=False) + expected_warning = ( + f'[WARNING] Provider (id={provider.id}, name={provider.name}, ' + f'slug={provider.slug}, site_id={provider.site_id}) ' + f'has no direct SAML configuration and no matching default configuration was found.' + ) + self.assertIn(expected_warning, output) + self.assertIn('Missing configs: 1', output) # 1 from this test (provider with no config and no default) + self.assertIn('Disabled configs: 1', output) # 1 from setUp data + + def test_run_checks_null_config_id(self): + """ + Test the --run-checks command identifies providers with disabled default configurations. + When a provider has no direct SAML configuration and the default config is disabled, + it should be reported as a missing config issue. + """ + # Create a disabled default configuration for this site + disabled_default_config = SAMLConfigurationFactory.create( + site=self.site, + slug='default', + entity_id='https://default.example.com', + enabled=False + ) + + # Create a provider with no direct SAML configuration + # It will fall back to the disabled default config + provider = SAMLProviderConfigFactory.create( + site=self.site, + slug='null-id-provider', + saml_configuration=None + ) + + output = self._run_checks_command() + + expected_warning = ( + f'[WARNING] Provider (id={provider.id}, name={provider.name}, ' + f'slug={provider.slug}, site_id={provider.site_id}) ' + f'has no direct SAML configuration and the default configuration ' + f'(id={disabled_default_config.id}, enabled=False).' + ) + self.assertIn(expected_warning, output) + self.assertIn('Missing configs: 0', output) # No missing configs since default config exists + self.assertIn('Disabled configs: 2', output) # 1 from this test + 1 from setUp data + + def test_run_checks_with_default_config(self): + """ + Test the --run-checks command correctly handles providers with default configurations. + """ + provider = SAMLProviderConfigFactory.create( + site=self.site, + slug='default-config-provider', + saml_configuration=None + ) + + default_config = SAMLConfigurationFactory.create( + site=self.site, + slug='default', + entity_id='https://default.example.com' + ) + + output = self._run_checks_command() + + self.assertIn('Missing configs: 0', output) # This tests provider has valid default config + self.assertIn('Disabled configs: 1', output) # From setUp + + def test_run_checks_disabled_functionality(self): + """ + Test the --run-checks command handles disabled providers and configurations. + """ + disabled_provider = SAMLProviderConfigFactory.create( + site=self.site, + slug='disabled-provider', + enabled=False + ) + + disabled_config = SAMLConfigurationFactory.create( + site=self.site, + slug='disabled-config', + enabled=False + ) + + provider_with_disabled_config = SAMLProviderConfigFactory.create( + site=self.site, + slug='provider-with-disabled-config', + saml_configuration=disabled_config + ) + + output = self._run_checks_command() + + expected_warning = ( + f'[WARNING] Provider (id={provider_with_disabled_config.id}, ' + f'name={provider_with_disabled_config.name}, ' + f'slug={provider_with_disabled_config.slug}, ' + f'site_id={provider_with_disabled_config.site_id}) ' + f'has SAML config (id={disabled_config.id}, enabled=False).' + ) + self.assertIn(expected_warning, output) + self.assertIn('Missing configs: 1', output) # disabled_provider has no config + self.assertIn('Disabled configs: 2', output) # setUp's provider + provider_with_disabled_config diff --git a/common/djangoapps/third_party_auth/models.py b/common/djangoapps/third_party_auth/models.py index 6d244d96eddd..412875fd6cf0 100644 --- a/common/djangoapps/third_party_auth/models.py +++ b/common/djangoapps/third_party_auth/models.py @@ -810,6 +810,17 @@ def match_social_auth(self, social_auth): prefix = self.slug + ":" return self.backend_name == social_auth.provider and social_auth.uid.startswith(prefix) + def get_remote_id_from_field_name(self, social_auth, field_name): + """ Given a UserSocialAuth object, return the user remote ID against the field name provided. """ + if not self.match_social_auth(social_auth): + raise ValueError( + f"UserSocialAuth record does not match given provider {self.provider_id}" + ) + field_value = social_auth.extra_data.get(field_name, None) + if field_value and isinstance(field_value, list): + return field_value[0] + return field_value + def get_remote_id_from_social_auth(self, social_auth): """ Given a UserSocialAuth object, return the remote ID used by this provider. """ assert self.match_social_auth(social_auth) diff --git a/common/djangoapps/third_party_auth/pipeline.py b/common/djangoapps/third_party_auth/pipeline.py index 496cfce93c1f..4b8804ca3802 100644 --- a/common/djangoapps/third_party_auth/pipeline.py +++ b/common/djangoapps/third_party_auth/pipeline.py @@ -1009,7 +1009,7 @@ def get_username(strategy, details, backend, user=None, *args, **kwargs): # lin else: slug_func = lambda val: val - if is_auto_generated_username_enabled(): + if is_auto_generated_username_enabled() and details.get('username') is None: username = get_auto_generated_username(details) else: if email_as_username and details.get('email'): diff --git a/lms/djangoapps/branding/tests/test_views.py b/lms/djangoapps/branding/tests/test_views.py index 36ebcd73509e..10c27192d11a 100644 --- a/lms/djangoapps/branding/tests/test_views.py +++ b/lms/djangoapps/branding/tests/test_views.py @@ -269,6 +269,20 @@ def test_index_does_not_redirect_without_site_override(self): response = self.client.get(reverse("root")) assert response.status_code == 200 + @override_settings(ENABLE_MKTG_SITE=True) + @override_settings(MKTG_URLS={'ROOT': 'https://foo.bar/'}) + @override_settings(LMS_ROOT_URL='https://foo.bar/') + def test_index_wont_redirect_to_marketing_root_if_it_matches_lms_root(self): + response = self.client.get(reverse("root")) + assert response.status_code == 200 + + @override_settings(ENABLE_MKTG_SITE=True) + @override_settings(MKTG_URLS={'ROOT': 'https://home.foo.bar/'}) + @override_settings(LMS_ROOT_URL='https://foo.bar/') + def test_index_will_redirect_to_new_root_if_mktg_site_is_enabled(self): + response = self.client.get(reverse("root")) + assert response.status_code == 302 + def test_index_redirects_to_marketing_site_with_site_override(self): """ Test index view redirects if MKTG_URLS['ROOT'] is set in SiteConfiguration """ self.use_site(self.site_other) diff --git a/lms/djangoapps/branding/views.py b/lms/djangoapps/branding/views.py index 711adb85afec..33c5813f16ff 100644 --- a/lms/djangoapps/branding/views.py +++ b/lms/djangoapps/branding/views.py @@ -42,7 +42,7 @@ def index(request): # page to make it easier to browse for courses (and register) if configuration_helpers.get_value( 'ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER', - settings.FEATURES.get('ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER', True)): + getattr(settings, 'ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER', True)): return redirect('dashboard') if use_catalog_mfe(): @@ -50,7 +50,7 @@ def index(request): enable_mktg_site = configuration_helpers.get_value( 'ENABLE_MKTG_SITE', - settings.FEATURES.get('ENABLE_MKTG_SITE', False) + getattr(settings, 'ENABLE_MKTG_SITE', False) ) if enable_mktg_site: @@ -58,7 +58,9 @@ def index(request): 'MKTG_URLS', settings.MKTG_URLS ) - return redirect(marketing_urls.get('ROOT')) + root_url = marketing_urls.get("ROOT") + if root_url != getattr(settings, "LMS_ROOT_URL", None): + return redirect(root_url) domain = request.headers.get('Host') diff --git a/lms/djangoapps/bulk_email/signals.py b/lms/djangoapps/bulk_email/signals.py index 7402dca75482..da18a459aeaa 100644 --- a/lms/djangoapps/bulk_email/signals.py +++ b/lms/djangoapps/bulk_email/signals.py @@ -7,6 +7,7 @@ from eventtracking import tracker from common.djangoapps.student.models import CourseEnrollment +from openedx.core.djangoapps.content.course_overviews.models import CourseOverview from openedx.core.djangoapps.user_api.accounts.signals import USER_RETIRE_MAILINGS from edx_ace.signals import ACE_MESSAGE_SENT @@ -27,7 +28,14 @@ def force_optout_all(sender, **kwargs): # lint-amnesty, pylint: disable=unused- raise TypeError('Expected a User type, but received None.') for enrollment in CourseEnrollment.objects.filter(user=user): - Optout.objects.get_or_create(user=user, course_id=enrollment.course.id) + try: + Optout.objects.get_or_create(user=user, course_id=enrollment.course.id) + except CourseOverview.DoesNotExist: + log.warning( + f"CourseOverview not found for enrollment {enrollment.id} (user: {user.id}), " + f"skipping optout creation. This may mean the course was deleted." + ) + continue @receiver(ACE_MESSAGE_SENT) diff --git a/lms/djangoapps/bulk_email/tests/test_signals.py b/lms/djangoapps/bulk_email/tests/test_signals.py index 1a3715284b12..01ad9312da4c 100644 --- a/lms/djangoapps/bulk_email/tests/test_signals.py +++ b/lms/djangoapps/bulk_email/tests/test_signals.py @@ -10,9 +10,11 @@ from django.core.management import call_command from django.urls import reverse +from common.djangoapps.student.models import CourseEnrollment from common.djangoapps.student.tests.factories import AdminFactory, CourseEnrollmentFactory, UserFactory from lms.djangoapps.bulk_email.models import BulkEmailFlag, Optout from lms.djangoapps.bulk_email.signals import force_optout_all +from opaque_keys.edx.keys import CourseKey from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order from xmodule.modulestore.tests.factories import CourseFactory # lint-amnesty, pylint: disable=wrong-import-order @@ -85,3 +87,41 @@ def test_optout_course(self): assert len(mail.outbox) == 1 assert len(mail.outbox[0].to) == 1 assert mail.outbox[0].to[0] == self.instructor.email + + @patch('lms.djangoapps.bulk_email.signals.log.warning') + def test_optout_handles_missing_course_overview(self, mock_log_warning): + """ + Test that force_optout_all gracefully handles CourseEnrollments + with missing CourseOverview records + """ + # Create a course key for a course that doesn't exist in CourseOverview + nonexistent_course_key = CourseKey.from_string('course-v1:TestX+Missing+2023') + + # Create an enrollment with a course_id that doesn't have a CourseOverview + CourseEnrollment.objects.create( + user=self.student, + course_id=nonexistent_course_key, + mode='honor' + ) + + # Verify the orphaned enrollment exists + assert CourseEnrollment.objects.filter( + user=self.student, + course_id=nonexistent_course_key + ).exists() + + force_optout_all(sender=self.__class__, user=self.student) + + # Verify that a warning was logged for the missing CourseOverview + mock_log_warning.assert_called() + call_args = mock_log_warning.call_args[0][0] + assert "CourseOverview not found for enrollment" in call_args + assert f"user: {self.student.id}" in call_args + assert "skipping optout creation" in call_args + + # Verify that optouts were created for valid courses only + valid_course_optouts = Optout.objects.filter(user=self.student, course_id=self.course.id) + missing_course_optouts = Optout.objects.filter(user=self.student, course_id=nonexistent_course_key) + + assert valid_course_optouts.count() == 1 + assert missing_course_optouts.count() == 0 diff --git a/lms/djangoapps/course_home_api/outline/serializers.py b/lms/djangoapps/course_home_api/outline/serializers.py index cfa518138a95..f66012362327 100644 --- a/lms/djangoapps/course_home_api/outline/serializers.py +++ b/lms/djangoapps/course_home_api/outline/serializers.py @@ -62,6 +62,7 @@ def get_blocks(self, block): # pylint: disable=missing-function-docstring 'type': block_type, 'has_scheduled_content': block.get('has_scheduled_content'), 'hide_from_toc': block.get('hide_from_toc'), + 'is_preview': block.get('is_preview', False), }, } if 'special_exam_info' in self.context.get('extra_fields', []) and block.get('special_exam_info'): diff --git a/lms/djangoapps/course_home_api/outline/tests/test_view.py b/lms/djangoapps/course_home_api/outline/tests/test_view.py index 74e22e5fcc4b..6de5db83f94c 100644 --- a/lms/djangoapps/course_home_api/outline/tests/test_view.py +++ b/lms/djangoapps/course_home_api/outline/tests/test_view.py @@ -13,6 +13,7 @@ from django.test import override_settings from django.urls import reverse from edx_toggles.toggles.testutils import override_waffle_flag +from opaque_keys.edx.keys import UsageKey from cms.djangoapps.contentstore.outlines import update_outline_from_modulestore from common.djangoapps.course_modes.models import CourseMode @@ -43,6 +44,7 @@ BlockFactory, CourseFactory ) +from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID @ddt.ddt @@ -484,6 +486,89 @@ def test_course_progress_analytics_disabled(self, mock_task): self.client.get(self.url) mock_task.assert_not_called() + # Tests for verified content preview functionality + # These tests cover the feature that allows audit learners to preview + # the structure of verified-only content without access to the content itself + + @patch('lms.djangoapps.course_home_api.outline.views.learner_can_preview_verified_content') + def test_verified_content_preview_disabled_integration(self, mock_preview_function): + """Test that when verified preview is disabled, no preview markers are added.""" + # Given a course with some Verified only sequences + with self.store.bulk_operations(self.course.id): + chapter = BlockFactory.create(category='chapter', parent_location=self.course.location) + sequential = BlockFactory.create( + category='sequential', + parent_location=chapter.location, + display_name='Verified Sequential', + group_access={ENROLLMENT_TRACK_PARTITION_ID: [2]} # restrict to verified only + ) + update_outline_from_modulestore(self.course.id) + + # ... where the preview feature is disabled + mock_preview_function.return_value = False + + # When I access them as an audit user + CourseEnrollment.enroll(self.user, self.course.id, CourseMode.AUDIT) + response = self.client.get(self.url) + + # Then I get a valid response back + assert response.status_code == 200 + + # ... with course_blocks populated + course_blocks = response.data['course_blocks']["blocks"] + + # ... but with verified content omitted + assert str(sequential.location) not in course_blocks + + # ... and no block has preview set to true + for block in course_blocks: + assert course_blocks[block].get('is_preview') is not True + + @patch('lms.djangoapps.course_home_api.outline.views.learner_can_preview_verified_content') + @patch('lms.djangoapps.course_home_api.outline.views.get_user_course_outline') + def test_verified_content_preview_enabled_marks_previewable_content(self, mock_outline, mock_preview_enabled): + """Test that when verified preview is enabled, previewable sequences and chapters are marked.""" + # Given a course with some Verified only sequences and some regular sequences + with self.store.bulk_operations(self.course.id): + chapter = BlockFactory.create(category='chapter', parent_location=self.course.location) + verified_sequential = BlockFactory.create( + category='sequential', + parent_location=chapter.location, + display_name='Verified Sequential', + ) + regular_sequential = BlockFactory.create( + category='sequential', + parent_location=chapter.location, + display_name='Regular Sequential' + ) + update_outline_from_modulestore(self.course.id) + + # ... with an outline that correctly identifies previewable sequences + mock_course_outline = Mock() + mock_course_outline.sections = {Mock(usage_key=chapter.location)} + mock_course_outline.sequences = {verified_sequential.location, regular_sequential.location} + mock_course_outline.previewable_sequences = {verified_sequential.location} + mock_outline.return_value = mock_course_outline + + # When I access them as an audit user with preview enabled + CourseEnrollment.enroll(self.user, self.course.id, CourseMode.AUDIT) + mock_preview_enabled.return_value = True + + # Then I get a valid response back + response = self.client.get(self.url) + assert response.status_code == 200 + + # ... with course_blocks populated + course_blocks = response.data['course_blocks']["blocks"] + + for block in course_blocks: + # ... and the verified only content is marked as preview only + if UsageKey.from_string(block) in mock_course_outline.previewable_sequences: + assert course_blocks[block].get('is_preview') is True + # ... and the regular content is not marked as preview + else: + assert course_blocks[block].get('is_preview') is False + @ddt.ddt class SidebarBlocksTestViews(BaseCourseHomeTests): diff --git a/lms/djangoapps/course_home_api/outline/views.py b/lms/djangoapps/course_home_api/outline/views.py index 7c5307cba764..78d5767ffeed 100644 --- a/lms/djangoapps/course_home_api/outline/views.py +++ b/lms/djangoapps/course_home_api/outline/views.py @@ -36,7 +36,10 @@ ) from lms.djangoapps.course_home_api.utils import get_course_or_403 from lms.djangoapps.course_home_api.tasks import collect_progress_for_user_in_course -from lms.djangoapps.course_home_api.toggles import send_course_progress_analytics_for_student_is_enabled +from lms.djangoapps.course_home_api.toggles import ( + learner_can_preview_verified_content, + send_course_progress_analytics_for_student_is_enabled, +) from lms.djangoapps.courseware.access import has_access from lms.djangoapps.courseware.context_processor import user_timezone_locale_prefs from lms.djangoapps.courseware.courses import get_course_date_blocks, get_course_info_section @@ -209,6 +212,7 @@ def get(self, request, *args, **kwargs): # pylint: disable=too-many-statements allow_anonymous = COURSE_ENABLE_UNENROLLED_ACCESS_FLAG.is_enabled(course_key) allow_public = allow_anonymous and course.course_visibility == COURSE_VISIBILITY_PUBLIC allow_public_outline = allow_anonymous and course.course_visibility == COURSE_VISIBILITY_PUBLIC_OUTLINE + allow_preview_of_verified_content = learner_can_preview_verified_content(course_key, request.user) # User locale settings user_timezone_locale = user_timezone_locale_prefs(request) @@ -309,7 +313,8 @@ def get(self, request, *args, **kwargs): # pylint: disable=too-many-statements # so this is a tiny first step in that migration. if course_blocks: user_course_outline = get_user_course_outline( - course_key, request.user, datetime.now(tz=timezone.utc) + course_key, request.user, datetime.now(tz=timezone.utc), + preview_verified_content=allow_preview_of_verified_content ) available_seq_ids = {str(usage_key) for usage_key in user_course_outline.sequences} @@ -339,6 +344,19 @@ def get(self, request, *args, **kwargs): # pylint: disable=too-many-statements ) ] if 'children' in chapter_data else [] + # For audit preview of verified content, we don't remove verified content. + # Instead, we mark it as preview so the frontend can handle it appropriately. + if allow_preview_of_verified_content: + previewable_sequences = {str(usage_key) for usage_key in user_course_outline.previewable_sequences} + + # Iterate through course_blocks to mark previewable sequences and chapters + for chapter_data in course_blocks['children']: + if chapter_data['id'] in previewable_sequences: + chapter_data['is_preview'] = True + for seq_data in chapter_data.get('children', []): + if seq_data['id'] in previewable_sequences: + seq_data['is_preview'] = True + user_has_passing_grade = False if not request.user.is_anonymous: user_grade = CourseGradeFactory().read(request.user, course) diff --git a/lms/djangoapps/course_home_api/progress/api.py b/lms/djangoapps/course_home_api/progress/api.py index b2a8634c59f4..f89ecd3d2596 100644 --- a/lms/djangoapps/course_home_api/progress/api.py +++ b/lms/djangoapps/course_home_api/progress/api.py @@ -2,14 +2,226 @@ Python APIs exposed for the progress tracking functionality of the course home API. """ +from __future__ import annotations + from django.contrib.auth import get_user_model from opaque_keys.edx.keys import CourseKey +from openedx.core.lib.grade_utils import round_away_from_zero +from xmodule.graders import ShowCorrectness +from datetime import datetime, timezone from lms.djangoapps.courseware.courses import get_course_blocks_completion_summary +from dataclasses import dataclass, field User = get_user_model() +@dataclass +class _AssignmentBucket: + """Holds scores and visibility info for one assignment type. + + Attributes: + assignment_type: Full assignment type name from the grading policy (for example, "Homework"). + num_total: The total number of assignments expected to contribute to the grade before any + drop-lowest rules are applied. + last_grade_publish_date: The most recent date when grades for all assignments of assignment_type + are released and included in the final grade. + scores: Per-subsection fractional scores (each value is ``earned / possible`` and falls in + the range 0–1). While awaiting published content we pad the list with zero placeholders + so that its length always matches ``num_total`` until real scores replace them. + visibilities: Mirrors ``scores`` index-for-index and records whether each subsection's + correctness feedback is visible to the learner (``True``), hidden (``False``), or not + yet populated (``None`` when the entry is a placeholder). + included: Tracks whether each subsection currently counts toward the learner's grade as + determined by ``SubsectionGrade.show_grades``. Values follow the same convention as + ``visibilities`` (``True`` / ``False`` / ``None`` placeholders). + assignments_created: Count of real subsections inserted into the bucket so far. Once this + reaches ``num_total``, all placeholder entries have been replaced with actual data. + """ + assignment_type: str + num_total: int + last_grade_publish_date: datetime + scores: list[float] = field(default_factory=list) + visibilities: list[bool | None] = field(default_factory=list) + included: list[bool | None] = field(default_factory=list) + assignments_created: int = 0 + + @classmethod + def with_placeholders(cls, assignment_type: str, num_total: int, now: datetime): + """Create a bucket prefilled with placeholder (empty) entries.""" + return cls( + assignment_type=assignment_type, + num_total=num_total, + last_grade_publish_date=now, + scores=[0] * num_total, + visibilities=[None] * num_total, + included=[None] * num_total, + ) + + def add_subsection(self, score: float, is_visible: bool, is_included: bool): + """Add a subsection’s score and visibility, replacing a placeholder if space remains.""" + if self.assignments_created < self.num_total: + if self.scores: + self.scores.pop(0) + if self.visibilities: + self.visibilities.pop(0) + if self.included: + self.included.pop(0) + self.scores.append(score) + self.visibilities.append(is_visible) + self.included.append(is_included) + self.assignments_created += 1 + + def drop_lowest(self, num_droppable: int): + """Remove the lowest scoring subsections, up to the provided num_droppable.""" + while num_droppable > 0 and self.scores: + idx = self.scores.index(min(self.scores)) + self.scores.pop(idx) + self.visibilities.pop(idx) + self.included.pop(idx) + num_droppable -= 1 + + def hidden_state(self) -> str: + """Return whether kept scores are all, some, or none hidden.""" + if not self.visibilities: + return 'none' + all_hidden = all(v is False for v in self.visibilities) + some_hidden = any(v is False for v in self.visibilities) + if all_hidden: + return 'all' + if some_hidden: + return 'some' + return 'none' + + def averages(self) -> tuple[float, float]: + """Compute visible and included averages over kept scores. + + Visible average uses only grades with visibility flag True in numerator; denominator is total + number of kept scores (mirrors legacy behavior). Included average uses only scores that are + marked included (show_grades True) in numerator with same denominator. + + Returns: + (earned_visible, earned_all) tuple of floats (0-1 each). + """ + if not self.scores: + return 0.0, 0.0 + visible_scores = [s for i, s in enumerate(self.scores) if self.visibilities[i]] + included_scores = [s for i, s in enumerate(self.scores) if self.included[i]] + earned_visible = (sum(visible_scores) / len(self.scores)) if self.scores else 0.0 + earned_all = (sum(included_scores) / len(self.scores)) if self.scores else 0.0 + return earned_visible, earned_all + + +class _AssignmentTypeGradeAggregator: + """Collects and aggregates subsection grades by assignment type.""" + + def __init__(self, course_grade, grading_policy: dict, has_staff_access: bool): + """Initialize with course grades, grading policy, and staff access flag.""" + self.course_grade = course_grade + self.grading_policy = grading_policy + self.has_staff_access = has_staff_access + self.now = datetime.now(timezone.utc) + self.policy_map = self._build_policy_map() + self.buckets: dict[str, _AssignmentBucket] = {} + + def _build_policy_map(self) -> dict: + """Convert grading policy into a lookup of assignment type → policy info.""" + policy_map = {} + for policy in self.grading_policy.get('GRADER', []): + policy_map[policy.get('type')] = { + 'weight': policy.get('weight', 0.0), + 'short_label': policy.get('short_label', ''), + 'num_droppable': policy.get('drop_count', 0), + 'num_total': policy.get('min_count', 0), + } + return policy_map + + def _bucket_for(self, assignment_type: str) -> _AssignmentBucket: + """Get or create a score bucket for the given assignment type.""" + bucket = self.buckets.get(assignment_type) + if bucket is None: + num_total = self.policy_map.get(assignment_type, {}).get('num_total', 0) or 0 + bucket = _AssignmentBucket.with_placeholders(assignment_type, num_total, self.now) + self.buckets[assignment_type] = bucket + return bucket + + def collect(self): + """Gather subsection grades into their respective assignment buckets.""" + for chapter in self.course_grade.chapter_grades.values(): + for subsection_grade in chapter.get('sections', []): + if not getattr(subsection_grade, 'graded', False): + continue + assignment_type = getattr(subsection_grade, 'format', '') or '' + if not assignment_type: + continue + graded_total = getattr(subsection_grade, 'graded_total', None) + earned = getattr(graded_total, 'earned', 0.0) if graded_total else 0.0 + possible = getattr(graded_total, 'possible', 0.0) if graded_total else 0.0 + earned = 0.0 if earned is None else earned + possible = 0.0 if possible is None else possible + score = (earned / possible) if possible else 0.0 + is_visible = ShowCorrectness.correctness_available( + subsection_grade.show_correctness, subsection_grade.due, self.has_staff_access + ) + is_included = subsection_grade.show_grades(self.has_staff_access) + bucket = self._bucket_for(assignment_type) + bucket.add_subsection(score, is_visible, is_included) + visibilities_with_due_dates = [ShowCorrectness.PAST_DUE, ShowCorrectness.NEVER_BUT_INCLUDE_GRADE] + if subsection_grade.show_correctness in visibilities_with_due_dates: + if subsection_grade.due and subsection_grade.due > bucket.last_grade_publish_date: + bucket.last_grade_publish_date = subsection_grade.due + + def build_results(self) -> dict: + """Apply drops, compute averages, and return aggregated results and total grade.""" + final_grades = 0.0 + rows = [] + for assignment_type, bucket in self.buckets.items(): + policy = self.policy_map.get(assignment_type, {}) + bucket.drop_lowest(policy.get('num_droppable', 0)) + earned_visible, earned_all = bucket.averages() + weight = policy.get('weight', 0.0) + short_label = policy.get('short_label', '') + row = { + 'type': assignment_type, + 'weight': weight, + 'average_grade': round_away_from_zero(earned_visible, 4), + 'weighted_grade': round_away_from_zero(earned_visible * weight, 4), + 'short_label': short_label, + 'num_droppable': policy.get('num_droppable', 0), + 'last_grade_publish_date': bucket.last_grade_publish_date, + 'has_hidden_contribution': bucket.hidden_state(), + } + final_grades += earned_all * weight + rows.append(row) + rows.sort(key=lambda r: r['weight']) + return {'results': rows, 'final_grades': round_away_from_zero(final_grades, 4)} + + def run(self) -> dict: + """Execute full pipeline (collect + aggregate) returning final payload.""" + self.collect() + return self.build_results() + + +def aggregate_assignment_type_grade_summary( + course_grade, + grading_policy: dict, + has_staff_access: bool = False, +) -> dict: + """ + Aggregate subsection grades by assignment type and return summary data. + Args: + course_grade: CourseGrade object containing chapter and subsection grades. + grading_policy: Dictionary representing the course's grading policy. + has_staff_access: Boolean indicating if the user has staff access to view all grades. + Returns: + Dictionary with keys: + results: list of per-assignment-type summary dicts + final_grades: overall weighted contribution (float, 4 decimal rounding) + """ + aggregator = _AssignmentTypeGradeAggregator(course_grade, grading_policy, has_staff_access) + return aggregator.run() + + def calculate_progress_for_learner_in_course(course_key: CourseKey, user: User) -> dict: """ Calculate a given learner's progress in the specified course run. diff --git a/lms/djangoapps/course_home_api/progress/serializers.py b/lms/djangoapps/course_home_api/progress/serializers.py index 6bdc204434af..c48660a41c6a 100644 --- a/lms/djangoapps/course_home_api/progress/serializers.py +++ b/lms/djangoapps/course_home_api/progress/serializers.py @@ -26,6 +26,7 @@ class SubsectionScoresSerializer(ReadOnlySerializer): assignment_type = serializers.CharField(source='format') block_key = serializers.SerializerMethodField() display_name = serializers.CharField() + due = serializers.DateTimeField(allow_null=True) has_graded_assignment = serializers.BooleanField(source='graded') override = serializers.SerializerMethodField() learner_has_access = serializers.SerializerMethodField() @@ -127,6 +128,20 @@ class VerificationDataSerializer(ReadOnlySerializer): status_date = serializers.DateTimeField() +class AssignmentTypeScoresSerializer(ReadOnlySerializer): + """ + Serializer for aggregated scores per assignment type. + """ + type = serializers.CharField() + weight = serializers.FloatField() + average_grade = serializers.FloatField() + weighted_grade = serializers.FloatField() + last_grade_publish_date = serializers.DateTimeField() + has_hidden_contribution = serializers.CharField() + short_label = serializers.CharField() + num_droppable = serializers.IntegerField() + + class ProgressTabSerializer(VerifiedModeSerializer): """ Serializer for progress tab @@ -146,3 +161,5 @@ class ProgressTabSerializer(VerifiedModeSerializer): user_has_passing_grade = serializers.BooleanField() verification_data = VerificationDataSerializer() disable_progress_graph = serializers.BooleanField() + assignment_type_grade_summary = AssignmentTypeScoresSerializer(many=True) + final_grades = serializers.FloatField() diff --git a/lms/djangoapps/course_home_api/progress/tests/test_api.py b/lms/djangoapps/course_home_api/progress/tests/test_api.py index 30d8d9059eaa..51e7dd68286e 100644 --- a/lms/djangoapps/course_home_api/progress/tests/test_api.py +++ b/lms/djangoapps/course_home_api/progress/tests/test_api.py @@ -6,7 +6,80 @@ from django.test import TestCase -from lms.djangoapps.course_home_api.progress.api import calculate_progress_for_learner_in_course +from lms.djangoapps.course_home_api.progress.api import ( + calculate_progress_for_learner_in_course, + aggregate_assignment_type_grade_summary, +) +from xmodule.graders import ShowCorrectness +from datetime import datetime, timedelta, timezone +from types import SimpleNamespace + + +def _make_subsection(fmt, earned, possible, show_corr, *, due_delta_days=None): + """Build a lightweight subsection object for testing aggregation scenarios.""" + graded_total = SimpleNamespace(earned=earned, possible=possible) + due = None + if due_delta_days is not None: + due = datetime.now(timezone.utc) + timedelta(days=due_delta_days) + return SimpleNamespace( + graded=True, + format=fmt, + graded_total=graded_total, + show_correctness=show_corr, + due=due, + show_grades=lambda staff: True, + ) + + +_AGGREGATION_SCENARIOS = [ + ( + 'all_visible_always', + {'type': 'Homework', 'weight': 1.0, 'drop_count': 0, 'min_count': 2, 'short_label': 'HW'}, + [ + _make_subsection('Homework', 1, 1, ShowCorrectness.ALWAYS), + _make_subsection('Homework', 0.5, 1, ShowCorrectness.ALWAYS), + ], + {'avg': 0.75, 'weighted': 0.75, 'hidden': 'none', 'final': 0.75}, + ), + ( + 'some_hidden_never_but_include', + {'type': 'Exam', 'weight': 1.0, 'drop_count': 0, 'min_count': 2, 'short_label': 'EX'}, + [ + _make_subsection('Exam', 1, 1, ShowCorrectness.ALWAYS), + _make_subsection('Exam', 0.5, 1, ShowCorrectness.NEVER_BUT_INCLUDE_GRADE), + ], + {'avg': 0.5, 'weighted': 0.5, 'hidden': 'some', 'final': 0.75}, + ), + ( + 'all_hidden_never_but_include', + {'type': 'Quiz', 'weight': 1.0, 'drop_count': 0, 'min_count': 2, 'short_label': 'QZ'}, + [ + _make_subsection('Quiz', 0.4, 1, ShowCorrectness.NEVER_BUT_INCLUDE_GRADE), + _make_subsection('Quiz', 0.6, 1, ShowCorrectness.NEVER_BUT_INCLUDE_GRADE), + ], + {'avg': 0.0, 'weighted': 0.0, 'hidden': 'all', 'final': 0.5}, + ), + ( + 'past_due_mixed_visibility', + {'type': 'Lab', 'weight': 1.0, 'drop_count': 0, 'min_count': 2, 'short_label': 'LB'}, + [ + _make_subsection('Lab', 0.8, 1, ShowCorrectness.PAST_DUE, due_delta_days=-1), + _make_subsection('Lab', 0.2, 1, ShowCorrectness.PAST_DUE, due_delta_days=+3), + ], + {'avg': 0.4, 'weighted': 0.4, 'hidden': 'some', 'final': 0.5}, + ), + ( + 'drop_lowest_keeps_high_scores', + {'type': 'Project', 'weight': 1.0, 'drop_count': 2, 'min_count': 4, 'short_label': 'PR'}, + [ + _make_subsection('Project', 1, 1, ShowCorrectness.ALWAYS), + _make_subsection('Project', 1, 1, ShowCorrectness.ALWAYS), + _make_subsection('Project', 0, 1, ShowCorrectness.ALWAYS), + _make_subsection('Project', 0, 1, ShowCorrectness.ALWAYS), + ], + {'avg': 1.0, 'weighted': 1.0, 'hidden': 'none', 'final': 1.0}, + ), +] class ProgressApiTests(TestCase): @@ -73,3 +146,37 @@ def test_calculate_progress_for_learner_in_course_summary_empty(self, mock_get_s results = calculate_progress_for_learner_in_course("some_course", "some_user") assert not results + + def test_aggregate_assignment_type_grade_summary_scenarios(self): + """ + A test to verify functionality of aggregate_assignment_type_grade_summary. + 1. Test visibility modes (always, never but include grade, past due) + 2. Test drop-lowest behavior + 3. Test weighting behavior + 4. Test final grade calculation + 5. Test average grade calculation + 6. Test weighted grade calculation + 7. Test has_hidden_contribution calculation + """ + + for case_name, policy, subsections, expected in _AGGREGATION_SCENARIOS: + with self.subTest(case_name=case_name): + course_grade = SimpleNamespace(chapter_grades={'chapter': {'sections': subsections}}) + grading_policy = {'GRADER': [policy]} + + result = aggregate_assignment_type_grade_summary( + course_grade, + grading_policy, + has_staff_access=False, + ) + + assert 'results' in result and 'final_grades' in result + assert result['final_grades'] == expected['final'] + assert len(result['results']) == 1 + + row = result['results'][0] + assert row['type'] == policy['type'], case_name + assert row['average_grade'] == expected['avg'] + assert row['weighted_grade'] == expected['weighted'] + assert row['has_hidden_contribution'] == expected['hidden'] + assert row['num_droppable'] == policy['drop_count'] diff --git a/lms/djangoapps/course_home_api/progress/tests/test_views.py b/lms/djangoapps/course_home_api/progress/tests/test_views.py index d13ebec29c21..8012e11675f1 100644 --- a/lms/djangoapps/course_home_api/progress/tests/test_views.py +++ b/lms/djangoapps/course_home_api/progress/tests/test_views.py @@ -282,8 +282,8 @@ def test_url_hidden_if_subsection_hide_after_due(self): assert hide_after_due_subsection['url'] is None @ddt.data( - (True, 0.7), # midterm and final are visible to staff - (False, 0.3), # just the midterm is visible to learners + (True, 0.72), # lab, midterm and final are visible to staff + (False, 0.32), # Only lab and midterm is visible to learners ) @ddt.unpack def test_course_grade_considers_subsection_grade_visibility(self, is_staff, expected_percent): @@ -301,14 +301,18 @@ def test_course_grade_considers_subsection_grade_visibility(self, is_staff, expe never = self.add_subsection_with_problem(format='Homework', show_correctness='never') always = self.add_subsection_with_problem(format='Midterm Exam', show_correctness='always') past_due = self.add_subsection_with_problem(format='Final Exam', show_correctness='past_due', due=tomorrow) + never_but_show_grade = self.add_subsection_with_problem( + format='Lab', show_correctness='never_but_include_grade' + ) answer_problem(self.course, get_mock_request(self.user), never) answer_problem(self.course, get_mock_request(self.user), always) answer_problem(self.course, get_mock_request(self.user), past_due) + answer_problem(self.course, get_mock_request(self.user), never_but_show_grade) # First, confirm the grade in the database - it should never change based on user state. # This is midterm and final and a single problem added together. - assert CourseGradeFactory().read(self.user, self.course).percent == 0.72 + assert CourseGradeFactory().read(self.user, self.course).percent == 0.73 response = self.client.get(self.url) assert response.status_code == 200 diff --git a/lms/djangoapps/course_home_api/progress/views.py b/lms/djangoapps/course_home_api/progress/views.py index 3783c19061dc..54e71df48cc5 100644 --- a/lms/djangoapps/course_home_api/progress/views.py +++ b/lms/djangoapps/course_home_api/progress/views.py @@ -13,8 +13,11 @@ from rest_framework.response import Response from xmodule.modulestore.django import modulestore +from xmodule.graders import ShowCorrectness from common.djangoapps.student.models import CourseEnrollment from lms.djangoapps.course_home_api.progress.serializers import ProgressTabSerializer +from lms.djangoapps.course_home_api.progress.api import aggregate_assignment_type_grade_summary + from lms.djangoapps.course_home_api.toggles import course_home_mfe_progress_tab_is_active from lms.djangoapps.courseware.access import has_access, has_ccx_coach_role from lms.djangoapps.course_blocks.api import get_course_blocks @@ -99,6 +102,7 @@ class ProgressTabView(RetrieveAPIView): assignment_type: (str) the format, if any, of the Subsection (Homework, Exam, etc) block_key: (str) the key of the given subsection block display_name: (str) a str of what the name of the Subsection is for displaying on the site + due: (str or None) the due date of the subsection in ISO 8601 format, or None if no due date is set has_graded_assignment: (bool) whether or not the Subsection is a graded assignment learner_has_access: (bool) whether the learner has access to the subsection (could be FBE gated) num_points_earned: (int) the amount of points the user has earned for the given subsection @@ -175,6 +179,18 @@ def _get_student_user(self, request, course_key, student_id, is_staff): except User.DoesNotExist as exc: raise Http404 from exc + def _visible_section_scores(self, course_grade): + """Return only those chapter/section scores that are visible to the learner.""" + visible_chapters = [] + for chapter in course_grade.chapter_grades.values(): + filtered_sections = [ + subsection + for subsection in chapter["sections"] + if getattr(subsection, "show_correctness", None) != ShowCorrectness.NEVER_BUT_INCLUDE_GRADE + ] + visible_chapters.append({**chapter, "sections": filtered_sections}) + return visible_chapters + def get(self, request, *args, **kwargs): course_key_string = kwargs.get('course_key_string') course_key = CourseKey.from_string(course_key_string) @@ -245,6 +261,16 @@ def get(self, request, *args, **kwargs): access_expiration = get_access_expiration_data(request.user, course_overview) + # Aggregations delegated to helper functions for reuse and testability + assignment_type_grade_summary = aggregate_assignment_type_grade_summary( + course_grade, + grading_policy, + has_staff_access=is_staff, + ) + + # Filter out section scores to only have those that are visible to the user + section_scores = self._visible_section_scores(course_grade) + data = { 'access_expiration': access_expiration, 'certificate_data': get_cert_data(student, course, enrollment_mode, course_grade), @@ -255,12 +281,14 @@ def get(self, request, *args, **kwargs): 'enrollment_mode': enrollment_mode, 'grading_policy': grading_policy, 'has_scheduled_content': has_scheduled_content, - 'section_scores': list(course_grade.chapter_grades.values()), + 'section_scores': section_scores, 'studio_url': get_studio_url(course, 'settings/grading'), 'username': username, 'user_has_passing_grade': user_has_passing_grade, 'verification_data': verification_data, 'disable_progress_graph': disable_progress_graph, + 'assignment_type_grade_summary': assignment_type_grade_summary["results"], + 'final_grades': assignment_type_grade_summary["final_grades"], } context = self.get_serializer_context() context['staff_access'] = is_staff diff --git a/lms/djangoapps/course_home_api/tests/test_toggles.py b/lms/djangoapps/course_home_api/tests/test_toggles.py new file mode 100644 index 000000000000..46ab545d0ade --- /dev/null +++ b/lms/djangoapps/course_home_api/tests/test_toggles.py @@ -0,0 +1,155 @@ +""" +Tests for Course Home API toggles. +""" + +from unittest.mock import Mock, patch + +from django.test import TestCase +from opaque_keys.edx.keys import CourseKey + +from common.djangoapps.course_modes.models import CourseMode +from common.djangoapps.course_modes.tests.factories import CourseModeFactory + +from ..toggles import learner_can_preview_verified_content + + +class TestLearnerCanPreviewVerifiedContent(TestCase): + """Test cases for learner_can_preview_verified_content function.""" + + def setUp(self): + """Set up test fixtures.""" + self.course_key = CourseKey.from_string("course-v1:TestX+CS101+2024") + self.user = Mock() + + # Set up patchers + self.feature_enabled_patcher = patch( + "lms.djangoapps.course_home_api.toggles.audit_learner_verified_preview_is_enabled" + ) + self.verified_mode_for_course_patcher = patch( + "common.djangoapps.course_modes.models.CourseMode.verified_mode_for_course" + ) + self.get_enrollment_patcher = patch( + "common.djangoapps.student.models.CourseEnrollment.get_enrollment" + ) + + # Course set up with verified, professional, and audit modes + self.verified_mode = CourseModeFactory( + course_id=self.course_key, + mode_slug=CourseMode.VERIFIED, + mode_display_name="Verified", + ) + self.professional_mode = CourseModeFactory( + course_id=self.course_key, + mode_slug=CourseMode.PROFESSIONAL, + mode_display_name="Professional", + ) + self.audit_mode = CourseModeFactory( + course_id=self.course_key, + mode_slug=CourseMode.AUDIT, + mode_display_name="Audit", + ) + self.course_modes_dict = { + "audit": self.audit_mode, + "verified": self.verified_mode, + "professional": self.professional_mode, + } + + # Start patchers + self.mock_feature_enabled = self.feature_enabled_patcher.start() + self.mock_verified_mode_for_course = ( + self.verified_mode_for_course_patcher.start() + ) + self.mock_get_enrollment = self.get_enrollment_patcher.start() + + def _enroll_user(self, mode): + """Helper method to set up user enrollment mock.""" + mock_enrollment = Mock() + mock_enrollment.mode = mode + self.mock_get_enrollment.return_value = mock_enrollment + + def tearDown(self): + """Clean up patchers.""" + self.feature_enabled_patcher.stop() + self.verified_mode_for_course_patcher.stop() + self.get_enrollment_patcher.stop() + + def test_all_conditions_met_returns_true(self): + """Test that function returns True when all conditions are met.""" + # Given the feature is enabled, course has verified mode, and user is enrolled as audit + self.mock_feature_enabled.return_value = True + self.mock_verified_mode_for_course.return_value = self.course_modes_dict[ + "professional" + ] + self._enroll_user(CourseMode.AUDIT) + + # When I check if the learner can preview verified content + result = learner_can_preview_verified_content(self.course_key, self.user) + + # Then the result should be True + self.assertTrue(result) + + def test_feature_disabled_returns_false(self): + """Test that function returns False when feature is disabled.""" + # Given the feature is disabled + self.mock_feature_enabled.return_value = False + + # ... even if all other conditions are met + self.mock_verified_mode_for_course.return_value = self.course_modes_dict[ + "professional" + ] + self._enroll_user(CourseMode.AUDIT) + + # When I check if the learner can preview verified content + result = learner_can_preview_verified_content(self.course_key, self.user) + + # Then the result should be False + self.assertFalse(result) + + def test_no_verified_mode_returns_false(self): + """Test that function returns False when course has no verified mode.""" + # Given the course does not have a verified mode + self.mock_verified_mode_for_course.return_value = None + + # ... even if all other conditions are met + self.mock_feature_enabled.return_value = True + self._enroll_user(CourseMode.AUDIT) + + # When I check if the learner can preview verified content + result = learner_can_preview_verified_content(self.course_key, self.user) + + # Then the result should be False + self.assertFalse(result) + + def test_no_enrollment_returns_false(self): + """Test that function returns False when user is not enrolled.""" + # Given the user is unenrolled + self.mock_get_enrollment.return_value = None + + # ... even if all other conditions are met + self.mock_feature_enabled.return_value = True + self.mock_verified_mode_for_course.return_value = self.course_modes_dict[ + "professional" + ] + + # When I check if the learner can preview verified content + result = learner_can_preview_verified_content(self.course_key, self.user) + + # Then the result should be False + self.assertFalse(result) + + def test_verified_enrollment_returns_false(self): + """Test that function returns False when user is enrolled in verified mode.""" + # Given the user is not enrolled as audit + self._enroll_user(CourseMode.VERIFIED) + + # ... even if all other conditions are met + self.mock_feature_enabled.return_value = True + self.mock_verified_mode_for_course.return_value = self.course_modes_dict[ + "professional" + ] + + # When I check if the learner can preview verified content + result = learner_can_preview_verified_content(self.course_key, self.user) + + # Then the result should be False + self.assertFalse(result) diff --git a/lms/djangoapps/course_home_api/toggles.py b/lms/djangoapps/course_home_api/toggles.py index 052862796c75..1f2d32b87e96 100644 --- a/lms/djangoapps/course_home_api/toggles.py +++ b/lms/djangoapps/course_home_api/toggles.py @@ -3,6 +3,9 @@ """ from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag +from openedx.core.lib.cache_utils import request_cached +from common.djangoapps.course_modes.models import CourseMode +from common.djangoapps.student.models import CourseEnrollment WAFFLE_FLAG_NAMESPACE = 'course_home' @@ -51,6 +54,21 @@ ) +# Waffle flag to enable audit learner preview of course structure visible to verified learners. +# +# .. toggle_name: course_home.audit_learner_verified_preview +# .. toggle_implementation: CourseWaffleFlag +# .. toggle_default: False +# .. toggle_description: Where enabled, audit learners can see the presence of the sections / units +# otherwise restricted to verified learners. The content itself remains inaccessible. +# .. toggle_use_cases: open_edx +# .. toggle_creation_date: 2025-11-07 +# .. toggle_target_removal_date: None +COURSE_HOME_AUDIT_LEARNER_VERIFIED_PREVIEW = CourseWaffleFlag( + f'{WAFFLE_FLAG_NAMESPACE}.audit_learner_verified_preview', __name__ +) + + def course_home_mfe_progress_tab_is_active(course_key): # Avoiding a circular dependency from .models import DisableProgressPageStackedConfig @@ -73,3 +91,40 @@ def send_course_progress_analytics_for_student_is_enabled(course_key): Returns True if the course completion analytics feature is enabled for a given course. """ return COURSE_HOME_SEND_COURSE_PROGRESS_ANALYTICS_FOR_STUDENT.is_enabled(course_key) + + +def audit_learner_verified_preview_is_enabled(course_key): + """ + Returns True if the audit learner verified preview feature is enabled for a given course. + """ + return COURSE_HOME_AUDIT_LEARNER_VERIFIED_PREVIEW.is_enabled(course_key) + + +@request_cached() +def learner_can_preview_verified_content(course_key, user): + """ + Determine if an audit learner can preview verified content in a course. + + Args: + course_key: The course identifier. + user: The user object + Returns: + True if the learner can preview verified content, False otherwise. + """ + # To preview verified content, the feature must be enabled for the course... + feature_enabled = audit_learner_verified_preview_is_enabled(course_key) + if not feature_enabled: + return False + + # ... the course must have a verified mode + course_has_verified_mode = CourseMode.verified_mode_for_course(course_key) + if not course_has_verified_mode: + return False + + # ... and the user must be enrolled as audit + enrollment = CourseEnrollment.get_enrollment(user, course_key) + user_enrolled_as_audit = enrollment is not None and enrollment.mode == CourseMode.AUDIT + if not user_enrolled_as_audit: + return False + + return True diff --git a/lms/djangoapps/courseware/tests/test_views.py b/lms/djangoapps/courseware/tests/test_views.py index 4e3d1be9bddc..2c3ece3133a5 100644 --- a/lms/djangoapps/courseware/tests/test_views.py +++ b/lms/djangoapps/courseware/tests/test_views.py @@ -1781,6 +1781,14 @@ def assert_progress_page_show_grades(self, response, show_correctness, due_date, (ShowCorrectness.PAST_DUE, TODAY, True), (ShowCorrectness.PAST_DUE, TOMORROW, False), (ShowCorrectness.PAST_DUE, TOMORROW, True), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, None, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, None, True), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, YESTERDAY, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, YESTERDAY, True), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TODAY, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TODAY, True), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TOMORROW, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TOMORROW, True), ) @ddt.unpack def test_progress_page_no_problem_scores(self, show_correctness, due_date_name, graded): @@ -1821,6 +1829,14 @@ def test_progress_page_no_problem_scores(self, show_correctness, due_date_name, (ShowCorrectness.PAST_DUE, TODAY, True, True), (ShowCorrectness.PAST_DUE, TOMORROW, False, False), (ShowCorrectness.PAST_DUE, TOMORROW, True, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, None, False, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, None, True, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, YESTERDAY, False, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, YESTERDAY, True, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TODAY, False, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TODAY, True, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TOMORROW, False, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TOMORROW, True, False), ) @ddt.unpack def test_progress_page_hide_scores_from_learner(self, show_correctness, due_date_name, graded, show_grades): @@ -1873,11 +1889,20 @@ def test_progress_page_hide_scores_from_learner(self, show_correctness, due_date (ShowCorrectness.PAST_DUE, TODAY, True, True), (ShowCorrectness.PAST_DUE, TOMORROW, False, True), (ShowCorrectness.PAST_DUE, TOMORROW, True, True), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, None, False, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, None, True, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, YESTERDAY, False, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, YESTERDAY, True, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TODAY, False, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TODAY, True, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TOMORROW, False, False), + (ShowCorrectness.NEVER_BUT_INCLUDE_GRADE, TOMORROW, True, False), ) @ddt.unpack def test_progress_page_hide_scores_from_staff(self, show_correctness, due_date_name, graded, show_grades): """ - Test that problem scores are hidden from staff viewing a learner's progress page only if show_correctness=never. + Test that problem scores are hidden from staff viewing a learner's progress page only if show_correctness is + never or never_but_include_grade. """ due_date = self.DATES[due_date_name] self.setup_course(show_correctness=show_correctness, due_date=due_date, graded=graded) diff --git a/lms/djangoapps/courseware/toggles.py b/lms/djangoapps/courseware/toggles.py index f9f083cad42e..3ce78c554dd0 100644 --- a/lms/djangoapps/courseware/toggles.py +++ b/lms/djangoapps/courseware/toggles.py @@ -2,7 +2,7 @@ Toggles for courseware in-course experience. """ -from edx_toggles.toggles import SettingToggle, WaffleSwitch +from edx_toggles.toggles import SettingToggle, WaffleFlag, WaffleSwitch from openedx.core.djangoapps.waffle_utils import CourseWaffleFlag @@ -168,6 +168,19 @@ f'{WAFFLE_FLAG_NAMESPACE}.discovery_default_language_filter', __name__ ) +# .. toggle_name: courseware.unify_site_and_translation_language +# .. toggle_implementation: WaffleFlag +# .. toggle_default: False +# .. toggle_description: Update LMS to use site language for xpert unit translations and enable new header site language switcher. +# .. toggle_use_cases: opt_in +# .. toggle_creation_date: 2026-01-08 +# .. toggle_target_removal_date: None +# .. toggle_warning: None. +# .. toggle_tickets: https://github.com/edx/edx-platform/pull/81 +ENABLE_UNIFIED_SITE_AND_TRANSLATION_LANGUAGE = WaffleFlag( + f'{WAFFLE_FLAG_NAMESPACE}.unify_site_and_translation_language', __name__ +) + def course_exit_page_is_active(course_key): return COURSEWARE_MICROFRONTEND_COURSE_EXIT_PAGE.is_enabled(course_key) diff --git a/lms/djangoapps/discussion/rest_api/serializers.py b/lms/djangoapps/discussion/rest_api/serializers.py index 9c2668d0b226..8a7ab16e0903 100644 --- a/lms/djangoapps/discussion/rest_api/serializers.py +++ b/lms/djangoapps/discussion/rest_api/serializers.py @@ -37,6 +37,7 @@ get_course_staff_users_list, get_moderator_users_list, get_course_ta_users_list, + get_user_learner_status, ) from openedx.core.djangoapps.discussions.models import DiscussionTopicLink from openedx.core.djangoapps.discussions.utils import get_group_names_by_id @@ -182,6 +183,7 @@ class _ContentSerializer(serializers.Serializer): id = serializers.CharField(read_only=True) # pylint: disable=invalid-name author = serializers.SerializerMethodField() author_label = serializers.SerializerMethodField() + learner_status = serializers.SerializerMethodField() created_at = serializers.CharField(read_only=True) updated_at = serializers.CharField(read_only=True) raw_body = serializers.CharField(source="body", validators=[validate_not_blank]) @@ -275,6 +277,26 @@ def get_author_label(self, obj): user_id = int(obj["user_id"]) return self._get_user_label(user_id) + def get_learner_status(self, obj): + """ + Get the learner status for the discussion post author. + Returns one of: "anonymous", "staff", "new", "regular" + """ + # Skip for anonymous content + if self._is_anonymous(obj) or obj.get("user_id") is None: + return "anonymous" + + try: + user = User.objects.get(id=int(obj["user_id"])) + except (User.DoesNotExist, ValueError): + return "anonymous" + + course = self.context.get("course") + if not course: + return "anonymous" + + return get_user_learner_status(user, course.id) + def get_rendered_body(self, obj): """ Returns the rendered body content. diff --git a/lms/djangoapps/discussion/rest_api/tests/test_api_v2.py b/lms/djangoapps/discussion/rest_api/tests/test_api_v2.py index f5b15b905639..53c12454aec9 100644 --- a/lms/djangoapps/discussion/rest_api/tests/test_api_v2.py +++ b/lms/djangoapps/discussion/rest_api/tests/test_api_v2.py @@ -363,6 +363,7 @@ def test_basic_in_blackout_period_with_user_access(self, mock_emit): "course_id": str(self.course.id), "comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_id", "read": True, + "learner_status": "staff", "editable_fields": [ "abuse_flagged", "anonymous", @@ -689,6 +690,7 @@ def test_success(self, parent_id, mock_emit): "parent_id": parent_id, "author": self.user.username, "author_label": None, + "learner_status": "new", "created_at": "2015-05-27T00:00:00Z", "updated_at": "2015-05-27T00:00:00Z", "raw_body": "Test body", @@ -796,6 +798,7 @@ def test_success_in_black_out_with_user_access(self, parent_id, mock_emit): "parent_id": parent_id, "author": self.user.username, "author_label": "Moderator", + "learner_status": "staff", "created_at": "2015-05-27T00:00:00Z", "updated_at": "2015-05-27T00:00:00Z", "raw_body": "Test body", @@ -1799,6 +1802,7 @@ def test_basic(self, parent_id): "parent_id": parent_id, "author": self.user.username, "author_label": None, + "learner_status": "new", "created_at": "2015-06-03T00:00:00Z", "updated_at": "2015-06-03T00:00:00Z", "raw_body": "Edited body", @@ -3737,6 +3741,7 @@ def get_source_and_expected_comments(self): "parent_id": None, "author": self.author.username, "author_label": None, + "learner_status": "new", "created_at": "2015-05-11T00:00:00Z", "updated_at": "2015-05-11T11:11:11Z", "raw_body": "Test body", @@ -3771,6 +3776,7 @@ def get_source_and_expected_comments(self): "parent_id": None, "author": None, "author_label": None, + "learner_status": "anonymous", "created_at": "2015-05-11T22:22:22Z", "updated_at": "2015-05-11T33:33:33Z", "raw_body": "More content", diff --git a/lms/djangoapps/discussion/rest_api/tests/test_serializers.py b/lms/djangoapps/discussion/rest_api/tests/test_serializers.py index 0cbcc0bebdd1..a1443252a1ce 100644 --- a/lms/djangoapps/discussion/rest_api/tests/test_serializers.py +++ b/lms/djangoapps/discussion/rest_api/tests/test_serializers.py @@ -464,6 +464,7 @@ def test_basic(self): "can_delete": False, "last_edit": None, "edit_by_label": None, + "learner_status": "new", "profile_image": { "has_image": False, "image_url_full": "http://testserver/static/default_500.png", diff --git a/lms/djangoapps/discussion/rest_api/tests/test_views.py b/lms/djangoapps/discussion/rest_api/tests/test_views.py index be8a793abc92..e4d46168c46d 100644 --- a/lms/djangoapps/discussion/rest_api/tests/test_views.py +++ b/lms/djangoapps/discussion/rest_api/tests/test_views.py @@ -1113,6 +1113,7 @@ def setUp(self): {"key": "group_name", "value": None}, {"key": "has_endorsed", "value": False}, {"key": "last_edit", "value": None}, + {"key": "learner_status", "value": "new"}, {"key": "non_endorsed_comment_list_url", "value": None}, {"key": "preview_body", "value": "Test body"}, {"key": "raw_body", "value": "Test body"}, diff --git a/lms/djangoapps/discussion/rest_api/tests/test_views_v2.py b/lms/djangoapps/discussion/rest_api/tests/test_views_v2.py index 4247cbcab06c..431304a9a2b5 100644 --- a/lms/djangoapps/discussion/rest_api/tests/test_views_v2.py +++ b/lms/djangoapps/discussion/rest_api/tests/test_views_v2.py @@ -386,6 +386,7 @@ def expected_response_data(self, overrides=None): "image_url_medium": "http://testserver/static/default_50.png", "image_url_small": "http://testserver/static/default_30.png", }, + "learner_status": "new", } response_data.update(overrides or {}) return response_data diff --git a/lms/djangoapps/discussion/rest_api/tests/utils.py b/lms/djangoapps/discussion/rest_api/tests/utils.py index 342afb0ada5e..8c1615690ad5 100644 --- a/lms/djangoapps/discussion/rest_api/tests/utils.py +++ b/lms/djangoapps/discussion/rest_api/tests/utils.py @@ -588,6 +588,7 @@ def expected_thread_data(self, overrides=None): "closed_by_label": None, "close_reason": None, "close_reason_code": None, + "learner_status": "new", } response_data.update(overrides or {}) return response_data @@ -816,6 +817,7 @@ def expected_thread_data(self, overrides=None): "closed_by_label": None, "close_reason": None, "close_reason_code": None, + "learner_status": "new", } response_data.update(overrides or {}) return response_data diff --git a/lms/djangoapps/discussion/rest_api/utils.py b/lms/djangoapps/discussion/rest_api/utils.py index 0f02a0dcdcf2..8914527f1b6a 100644 --- a/lms/djangoapps/discussion/rest_api/utils.py +++ b/lms/djangoapps/discussion/rest_api/utils.py @@ -15,6 +15,7 @@ from common.djangoapps.student.roles import CourseInstructorRole, CourseStaffRole from common.djangoapps.student.models import CourseAccessRole +from completion.models import BlockCompletion from openedx.core.djangoapps.django_comment_common.comment_client.thread import Thread from lms.djangoapps.discussion.config.settings import ENABLE_CAPTCHA_IN_DISCUSSION @@ -496,3 +497,86 @@ def get_captcha_site_key_by_platform(platform: str) -> str | None: Get reCAPTCHA site key based on the platform. """ return settings.RECAPTCHA_SITE_KEYS.get(platform, None) + + +def _is_privileged_user(user, course_id): + """ + Check if a user has privileged roles (staff, moderator, TA, etc.) in the course. + + This helper function checks both forum roles and course access roles to determine + if a user should be considered privileged. + + Args: + user: User object to check + course_id: Course key to check roles in + + Returns: + bool: True if user has any privileged role, False otherwise + """ + # Check forum-specific privileged roles + user_roles = get_user_role_names(user, course_id) + privileged_roles = { + FORUM_ROLE_ADMINISTRATOR, + FORUM_ROLE_MODERATOR, + FORUM_ROLE_COMMUNITY_TA, + FORUM_ROLE_GROUP_MODERATOR + } + + if any(role in privileged_roles for role in user_roles): + return True + + # Check for staff roles using CourseAccessRole + # Include limited_staff for consistency with is_only_student check + return CourseAccessRole.objects.filter( + user=user, + course_id=course_id, + role__in=['instructor', 'staff', 'limited_staff'] + ).exists() + + +def _check_user_engagement(user, course_id): + """ + Returns True if the user shows meaningful engagement: + - Completed ≥ 2 blocks, or + - Completed at least 1 video or 1 problem. + """ + try: + completed = BlockCompletion.objects.filter( + user=user, context_key=course_id, completion=1.0 + ) + return ( + completed.count() >= 2 + or completed.filter(block_type__in=["video", "problem"]).exists() + ) + except (AttributeError, TypeError, ValueError): + return False + + +def get_user_learner_status(user, course_id): + """ + Determine a user's learner status in the given course. + + Possible return values: + - "anonymous" → User not logged in + - "staff" → Staff/moderator/TA + - "new" → Enrolled but no engagement + - "regular" → Enrolled and has engaged with course content + + Args: + user (User): Django user object + course_id (CourseKey): Course key to check engagement in + + Returns: + str: One of ["anonymous", "staff", "new", "regular"] + """ + # Anonymous user + if not user or not user.is_authenticated: + return "anonymous" + + # Privileged user (staff/moderator/TA) + if _is_privileged_user(user, course_id): + return "staff" + + # Engagement-based learner type + has_engagement = _check_user_engagement(user, course_id) + return "regular" if has_engagement else "new" diff --git a/lms/djangoapps/discussion/views.py b/lms/djangoapps/discussion/views.py index c01fb24b073a..7dfdaa896413 100644 --- a/lms/djangoapps/discussion/views.py +++ b/lms/djangoapps/discussion/views.py @@ -166,6 +166,7 @@ def get_threads(request, course, user_info, discussion_id=None, per_page=THREADS 'flagged', 'unread', 'unanswered', + 'context', ] ) ) diff --git a/lms/djangoapps/grades/subsection_grade.py b/lms/djangoapps/grades/subsection_grade.py index 4ce0a1f3a463..b0c98497b823 100644 --- a/lms/djangoapps/grades/subsection_grade.py +++ b/lms/djangoapps/grades/subsection_grade.py @@ -5,8 +5,8 @@ from abc import ABCMeta from collections import OrderedDict +from datetime import datetime, timezone from logging import getLogger - from lazy import lazy from lms.djangoapps.grades.models import BlockRecord, PersistentSubsectionGrade @@ -59,6 +59,13 @@ def show_grades(self, has_staff_access): """ Returns whether subsection scores are currently available to users with or without staff access. """ + if self.show_correctness == ShowCorrectness.NEVER_BUT_INCLUDE_GRADE: + # show_grades fn is used to determine if the grade should be included in final calculation. + # For NEVER_BUT_INCLUDE_GRADE, show_grades returns True if the due date has passed, + # but correctness_available always returns False as we do not want to show correctness + # of problems to the users. + return (self.due is None or + self.due < datetime.now(timezone.utc)) return ShowCorrectness.correctness_available(self.show_correctness, self.due, has_staff_access) @property diff --git a/lms/djangoapps/staticbook/tests.py b/lms/djangoapps/staticbook/tests.py index 919ee16c4fef..a4e0d09bf096 100644 --- a/lms/djangoapps/staticbook/tests.py +++ b/lms/djangoapps/staticbook/tests.py @@ -138,7 +138,7 @@ def test_book_chapter(self): url = self.make_url('pdf_book', book_index=0, chapter=2) response = self.client.get(url) self.assertContains(response, "Chapter 2 for PDF") - self.assertContains(response, "file={}".format(PDF_BOOK['chapters'][1]['url'])) + self.assertNotContains(response, "file={}".format(PDF_BOOK['chapters'][1]['url'])) self.assertNotContains(response, "page=") def test_book_page(self): @@ -148,7 +148,7 @@ def test_book_page(self): response = self.client.get(url) self.assertContains(response, "Chapter 1 for PDF") self.assertNotContains(response, "options.chapterNum =") - self.assertContains(response, "page=17") + self.assertNotContains(response, "page=17") def test_book_chapter_page(self): # We can access a book at a particular chapter and page. @@ -156,8 +156,8 @@ def test_book_chapter_page(self): url = self.make_url('pdf_book', book_index=0, chapter=2, page=17) response = self.client.get(url) self.assertContains(response, "Chapter 2 for PDF") - self.assertContains(response, "file={}".format(PDF_BOOK['chapters'][1]['url'])) - self.assertContains(response, "page=17") + self.assertNotContains(response, "file={}".format(PDF_BOOK['chapters'][1]['url'])) + self.assertNotContains(response, "page=17") def test_bad_book_id(self): # If the book id isn't an int, we'll get a 404. diff --git a/lms/djangoapps/staticbook/views.py b/lms/djangoapps/staticbook/views.py index 2e6d1c9a8ed5..a9a6e922fa08 100644 --- a/lms/djangoapps/staticbook/views.py +++ b/lms/djangoapps/staticbook/views.py @@ -86,13 +86,15 @@ def pdf_index(request, course_id, book_index, chapter=None, page=None): raise Http404(f"Invalid book index value: {book_index}") textbook = course.pdf_textbooks[book_index] - viewer_params = '&file=' + viewer_params = '' current_url = '' if 'url' in textbook: textbook['url'] = remap_static_url(textbook['url'], course) - viewer_params += textbook['url'] current_url = textbook['url'] + if not current_url.startswith(('http://', 'https://')): + viewer_params = '&file=' + viewer_params += current_url # then remap all the chapter URLs as well, if they are provided. current_chapter = None @@ -103,14 +105,20 @@ def pdf_index(request, course_id, book_index, chapter=None, page=None): current_chapter = textbook['chapters'][int(chapter) - 1] else: current_chapter = textbook['chapters'][0] - viewer_params += current_chapter['url'] + current_url = current_chapter['url'] + if not current_url.startswith(('http://', 'https://')): + viewer_params = '&file=' + viewer_params += current_url viewer_params += '#zoom=page-fit&disableRange=true' if page is not None: viewer_params += f'&page={page}' - if request.GET.get('viewer', '') == 'true': + if current_url.startswith('https://'): + current_url = '' + template = 'static_pdfbook.html' + elif request.GET.get('viewer', '') == 'true': template = 'pdf_viewer.html' else: template = 'static_pdfbook.html' diff --git a/lms/templates/courseware/progress.html b/lms/templates/courseware/progress.html index 711fad895427..3ee4044fcbbf 100644 --- a/lms/templates/courseware/progress.html +++ b/lms/templates/courseware/progress.html @@ -16,6 +16,7 @@ from lms.djangoapps.grades.api import constants as grades_constants from openedx.core.djangolib.markup import HTML, Text from openedx.features.enterprise_support.utils import get_enterprise_learner_generic_name +from xmodule.graders import ShowCorrectness %> <% @@ -180,7 +181,7 @@${section.display_name} - %if (total > 0 or earned > 0) and section.show_grades(staff_access): + %if (total > 0 or earned > 0) and ShowCorrectness.correctness_available(section.show_correctness, section.due, staff_access): ${_("{earned} of {total} possible points").format(earned='{:.3n}'.format(float(earned)), total='{:.3n}'.format(float(total)))} @@ -189,14 +190,14 @@
${button_text or _('View resource in a new window')} + External link
% else: diff --git a/lms/templates/static_pdfbook.html b/lms/templates/static_pdfbook.html index 813db47a2da0..24e152f06e0e 100644 --- a/lms/templates/static_pdfbook.html +++ b/lms/templates/static_pdfbook.html @@ -47,15 +47,19 @@ %endif