diff --git a/files/classes/comment.py b/files/classes/comment.py index ab1ee92d0..8f02f0456 100644 --- a/files/classes/comment.py +++ b/files/classes/comment.py @@ -1,16 +1,16 @@ +import math from typing import TYPE_CHECKING, Literal, Optional from urllib.parse import parse_qs, urlencode, urlparse from flask import g -import math from sqlalchemy import * from sqlalchemy.orm import relationship from files.classes.base import CreatedBase -from files.classes.visstate import StateMod, StateReport +from files.classes.visstate import StateMod, StateReport, VisibilityState from files.helpers.config.const import * from files.helpers.config.environment import SCORE_HIDING_TIME_HOURS, SITE_FULL -from files.helpers.content import (ModerationState, body_displayed, +from files.helpers.content import (body_displayed, execute_shadowbanned_fake_votes) from files.helpers.lazy import lazy from files.helpers.math import clamp @@ -420,22 +420,23 @@ class Comment(CreatedBase): @lazy def show_descendants(self, v:"User | None") -> bool: - if self.moderation_state.is_visible_to(v, getattr(self, 'is_blocking', False)): + if self.visibility_state.is_visible_to(v, getattr(self, 'is_blocking', False)): return True return bool(self.descendant_count) @lazy - def visibility_state(self, v:"User | None") -> tuple[bool, str]: + def visibility_and_message(self, v:"User | None") -> tuple[bool, str]: ''' Returns a tuple of whether this content is visible and a publicly visible message to accompany it. The visibility state machine is a slight mess but... this should at least unify the state checks. ''' - return self.moderation_state.visibility_state(v, getattr(self, 'is_blocking', False)) + return self.visibility_state.visibility_and_message( + v, getattr(self, 'is_blocking', False)) @property - def moderation_state(self) -> ModerationState: - return ModerationState.from_submittable(self) + def visibility_state(self) -> VisibilityState: + return VisibilityState.from_submittable(self) def volunteer_janitor_is_unknown(self): return self.volunteer_janitor_badness > 0.4 and self.volunteer_janitor_badness < 0.6 diff --git a/files/classes/cron/submission.py b/files/classes/cron/submission.py index 6cc6e387b..89155b35c 100644 --- a/files/classes/cron/submission.py +++ b/files/classes/cron/submission.py @@ -8,9 +8,9 @@ from sqlalchemy.sql.sqltypes import Boolean, Integer, String, Text from files.classes.cron.tasks import (RepeatableTask, ScheduledTaskType, TaskRunContext) from files.classes.submission import Submission -from files.classes.visstate import StateMod +from files.classes.visstate import StateMod, StateReport, VisibilityState from files.helpers.config.const import SUBMISSION_TITLE_LENGTH_MAXIMUM -from files.helpers.content import ModerationState, body_displayed +from files.helpers.content import body_displayed from files.helpers.lazy import lazy from files.helpers.sanitize import filter_emojis_only @@ -173,13 +173,12 @@ class ScheduledSubmissionTask(RepeatableTask): return f"/tasks/scheduled_posts/{self.id}/content" @property - def moderation_state(self) -> ModerationState: - return ModerationState( - removed=False, - removed_by_name=None, + def visibility_state(self) -> VisibilityState: + return VisibilityState( + state_mod=StateMod.VISIBLE, + state_mod_set_by=None, + state_report=StateReport.UNREPORTED, deleted=False, # we only want to show deleted UI color if disabled - reports_ignored=False, - filtered=False, op_shadowbanned=False, op_id=self.author_id_submission, op_name_safe=self.author_name diff --git a/files/classes/submission.py b/files/classes/submission.py index c9aad16a2..fe565effb 100644 --- a/files/classes/submission.py +++ b/files/classes/submission.py @@ -6,13 +6,13 @@ from sqlalchemy.orm import Session, declared_attr, deferred, relationship from files.classes.base import CreatedBase from files.classes.flags import Flag -from files.classes.visstate import StateMod, StateReport +from files.classes.visstate import StateMod, StateReport, VisibilityState from files.classes.votes import Vote from files.helpers.assetcache import assetcache_path from files.helpers.config.const import * from files.helpers.config.environment import (SCORE_HIDING_TIME_HOURS, SITE, SITE_FULL, SITE_ID) -from files.helpers.content import ModerationState, body_displayed +from files.helpers.content import body_displayed from files.helpers.lazy import lazy from files.helpers.time import format_age, format_datetime @@ -357,5 +357,5 @@ class Submission(CreatedBase): return f"/edit_post/{self.id}" @property - def moderation_state(self) -> ModerationState: - return ModerationState.from_submittable(self) + def visibility_state(self) -> VisibilityState: + return VisibilityState.from_submittable(self) diff --git a/files/classes/visstate.py b/files/classes/visstate.py index f6e2ed852..50d1b754f 100644 --- a/files/classes/visstate.py +++ b/files/classes/visstate.py @@ -1,5 +1,15 @@ +from __future__ import annotations import enum +from dataclasses import dataclass +from typing import TYPE_CHECKING + +from files.helpers.config.const import PERMS + +if TYPE_CHECKING: + from files.classes.user import User + from files.helpers.content import Submittable + class StateMod(enum.Enum): VISIBLE = 0 @@ -11,3 +21,107 @@ class StateReport(enum.Enum): RESOLVED = 1 REPORTED = 2 IGNORED = 3 + + +@dataclass(frozen=True, kw_only=True, slots=True) +class VisibilityState: + ''' + The full moderation state machine. It holds the moderation state, report + state, deleted information, and shadowban information. A decision to show + or hide a post or comment should be able to be done with information from + this alone. + ''' + state_mod: StateMod + state_mod_set_by: str | None + state_report: StateReport + state_mod_set_by: str | None + deleted: bool + op_shadowbanned: bool + op_id: int + op_name_safe: str + + @property + def removed(self) -> bool: + return self.state_mod == StateMod.REMOVED + + @property + def filtered(self) -> bool: + return self.state_mod == StateMod.FILTERED + + @property + def reports_ignored(self) -> bool: + return self.state_report == StateReport.IGNORED + + @classmethod + def from_submittable(cls, target: Submittable) -> "VisibilityState": + return cls( + state_mod=target.state_mod, + state_mod_set_by=target.state_mod_set_by, # type: ignore + state_report=target.state_report, + deleted=bool(target.state_user_deleted_utc != None), + op_shadowbanned=bool(target.author.shadowbanned), + op_id=target.author_id, # type: ignore + op_name_safe=target.author_name + ) + + def moderated_body(self, v: User | None) -> str | None: + if v and (v.admin_level >= PERMS['POST_COMMENT_MODERATION'] \ + or v.id == self.op_id): + return None + if self.deleted: return 'Deleted' + if self.appear_removed(v): return 'Removed' + if self.filtered: return 'Filtered' + return None + + def visibility_and_message(self, v: User | None, is_blocking: bool) -> tuple[bool, str]: + ''' + Returns a tuple of whether this content is visible and a publicly + visible message to accompany it. The visibility state machine is + a slight mess but... this should at least unify the state checks. + ''' + def can(v: User | None, perm_level: int) -> bool: + return v and v.admin_level >= perm_level + + can_moderate: bool = can(v, PERMS['POST_COMMENT_MODERATION']) + can_shadowban: bool = can(v, PERMS['USER_SHADOWBAN']) + + if v and v.id == self.op_id: + return True, "This shouldn't be here, please report it!" + if (self.removed and not can_moderate) or \ + (self.op_shadowbanned and not can_shadowban): + msg: str = 'Removed' + if self.state_mod_set_by: + msg = f'Removed by @{self.state_mod_set_by}' + return False, msg + if self.filtered and not can_moderate: + return False, 'Filtered' + if self.deleted and not can_moderate: + return False, 'Deleted by author' + if is_blocking: + return False, f'You are blocking @{self.op_name_safe}' + return True, "This shouldn't be here, please report it!" + + def is_visible_to(self, v: User | None, is_blocking: bool) -> bool: + return self.visibility_and_message(v, is_blocking)[0] + + def replacement_message(self, v: User | None, is_blocking: bool) -> str: + return self.visibility_and_message(v, is_blocking)[1] + + def appear_removed(self, v: User | None) -> bool: + if self.removed: return True + if not self.op_shadowbanned: return False + return (not v) or bool(v.admin_level < PERMS['USER_SHADOWBAN']) + + @property + def publicly_visible(self) -> bool: + return all( + not state for state in + [self.deleted, self.removed, self.filtered, self.op_shadowbanned] + ) + + @property + def explicitly_moderated(self) -> bool: + ''' + Whether this was removed or filtered and not as the result of a shadowban + ''' + return self.removed or self.filtered diff --git a/files/helpers/content.py b/files/helpers/content.py index 6ad452ab2..447acaf53 100644 --- a/files/helpers/content.py +++ b/files/helpers/content.py @@ -2,14 +2,10 @@ from __future__ import annotations import random import urllib.parse -from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Optional from sqlalchemy.orm import Session -from files.helpers.config.const import PERMS -from files.classes.visstate import StateMod, StateReport - if TYPE_CHECKING: from files.classes import Comment, Submission, User Submittable = Comment | Submission @@ -90,100 +86,8 @@ def canonicalize_url2(url:str, *, httpsify:bool=False) -> urllib.parse.ParseResu return url_parsed -@dataclass(frozen=True, kw_only=True, slots=True) -class ModerationState: - ''' - The moderation state machine. This holds moderation state information, - including whether this was removed, deleted, filtered, whether OP was - shadowbanned, etc - ''' - removed: bool - removed_by_name: str | None - deleted: bool - reports_ignored: bool - filtered: bool - op_shadowbanned: bool - op_id: int - op_name_safe: str - - @classmethod - def from_submittable(cls, target: Submittable) -> "ModerationState": - return cls( - removed=bool(target.state_mod != StateMod.VISIBLE), - removed_by_name=target.state_mod_set_by, # type: ignore - deleted=bool(target.state_user_deleted_utc != None), - reports_ignored=bool(target.state_report == StateReport.IGNORED), - filtered=bool(target.state_mod == StateMod.FILTERED), - op_shadowbanned=bool(target.author.shadowbanned), - op_id=target.author_id, # type: ignore - op_name_safe=target.author_name - ) - - def moderated_body(self, v: User | None) -> str | None: - if v and (v.admin_level >= PERMS['POST_COMMENT_MODERATION'] \ - or v.id == self.op_id): - return None - if self.deleted: return 'Deleted' - if self.appear_removed(v): return 'Removed' - if self.filtered: return 'Filtered' - return None - - def visibility_state(self, v: User | None, is_blocking: bool) -> tuple[bool, str]: - ''' - Returns a tuple of whether this content is visible and a publicly - visible message to accompany it. The visibility state machine is - a slight mess but... this should at least unify the state checks. - ''' - def can(v: User | None, perm_level: int) -> bool: - return v and v.admin_level >= perm_level - - can_moderate: bool = can(v, PERMS['POST_COMMENT_MODERATION']) - can_shadowban: bool = can(v, PERMS['USER_SHADOWBAN']) - - if v and v.id == self.op_id: - return True, "This shouldn't be here, please report it!" - if (self.removed and not can_moderate) or \ - (self.op_shadowbanned and not can_shadowban): - msg: str = 'Removed' - if self.removed_by_name: - msg = f'Removed by @{self.removed_by_name}' - return False, msg - if self.filtered and not can_moderate: - return False, 'Filtered' - if self.deleted and not can_moderate: - return False, 'Deleted by author' - if is_blocking: - return False, f'You are blocking @{self.op_name_safe}' - return True, "This shouldn't be here, please report it!" - - def is_visible_to(self, v: User | None, is_blocking: bool) -> bool: - return self.visibility_state(v, is_blocking)[0] - - def replacement_message(self, v: User | None, is_blocking: bool) -> str: - return self.visibility_state(v, is_blocking)[1] - - def appear_removed(self, v: User | None) -> bool: - if self.removed: return True - if not self.op_shadowbanned: return False - return (not v) or bool(v.admin_level < PERMS['USER_SHADOWBAN']) - - @property - def publicly_visible(self) -> bool: - return all( - not state for state in - [self.deleted, self.removed, self.filtered, self.op_shadowbanned] - ) - - @property - def explicitly_moderated(self) -> bool: - ''' - Whether this was removed or filtered and not as the result of a shadowban - ''' - return self.removed or self.filtered - - def body_displayed(target:Submittable, v:Optional[User], is_html:bool) -> str: - moderated:Optional[str] = target.moderation_state.moderated_body(v) + moderated:Optional[str] = target.visibility_state.moderated_body(v) if moderated: return moderated body = target.body_html if is_html else target.body diff --git a/files/templates/comments.html b/files/templates/comments.html index fb34992bc..7e659356c 100644 --- a/files/templates/comments.html +++ b/files/templates/comments.html @@ -11,7 +11,7 @@ {%- set downs = c.downvotes_str(render_ctx) -%} {% set replies = c.replies(v) %} -{% if not c.visibility_state(v)[0] %} +{% if not c.visibility_and_message(v)[0] %} {% if c.show_descendants(v) %}