Prevent duplicate logging setup when running multiple Synapse instances (#19067)

Be mindful that it's possible to run Synapse multiple times in the same
Python process. So we only need to do some part of the logging setup
once.

- We only need to setup the global log record factory and context filter
once
 - We only need to redirect Twisted logging once


### Background

As part of Element's plan to support a light form of vhosting (virtual
host) (multiple instances of Synapse in the same Python process), we're
currently diving into the details and implications of running multiple
instances of Synapse in the same Python process.

"Per-tenant logging" tracked internally by
https://github.com/element-hq/synapse-small-hosts/issues/48
This commit is contained in:
Eric Eastwood
2025-10-30 10:21:56 -05:00
committed by GitHub
parent f54ddbcace
commit 2c4057bf93
3 changed files with 60 additions and 33 deletions

1
changelog.d/19067.misc Normal file
View File

@@ -0,0 +1 @@
Prevent duplicate logging setup when running multiple Synapse instances.

View File

@@ -198,12 +198,27 @@ class LoggingConfig(Config):
log_config_file.write(DEFAULT_LOG_CONFIG.substitute(log_file=log_file))
def _setup_stdlib_logging(
config: "HomeServerConfig", log_config_path: Optional[str], logBeginner: LogBeginner
) -> None:
_already_performed_one_time_logging_setup: bool = False
"""
Marks whether we've already successfully ran `one_time_logging_setup()`.
"""
def one_time_logging_setup(*, logBeginner: LogBeginner = globalLogBeginner) -> None:
"""
Set up Python standard library logging.
Perform one-time logging configuration for the Python process.
For example, we don't need to have multiple log record factories. Once we've
configured it once, we don't need to do it again.
This matters because multiple Synapse instances can be run in the same Python
process (c.f. Synapse Pro for small hosts)
"""
global _already_performed_one_time_logging_setup
# We only need to set things up once.
if _already_performed_one_time_logging_setup:
return
# We add a log record factory that runs all messages through the
# LoggingContextFilter so that we get the context *at the time we log*
@@ -221,26 +236,6 @@ def _setup_stdlib_logging(
logging.setLogRecordFactory(factory)
# Configure the logger with the initial configuration.
if log_config_path is None:
log_format = (
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
" - %(message)s"
)
logger = logging.getLogger("")
logger.setLevel(logging.INFO)
logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO)
formatter = logging.Formatter(log_format)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
else:
# Load the logging configuration.
_load_logging_config(log_config_path)
# Route Twisted's native logging through to the standard library logging
# system.
observer = STDLibLogObserver()
@@ -281,6 +276,36 @@ def _setup_stdlib_logging(
logBeginner.beginLoggingTo([_log], redirectStandardIO=False)
_already_performed_one_time_logging_setup = True
def _setup_stdlib_logging(
config: "HomeServerConfig", log_config_path: Optional[str]
) -> None:
"""
Set up Python standard library logging.
"""
# Configure the logger with the initial configuration.
if log_config_path is None:
log_format = (
"%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
" - %(message)s"
)
logger = logging.getLogger("")
logger.setLevel(logging.INFO)
logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO)
formatter = logging.Formatter(log_format)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
else:
# Load the logging configuration.
_load_logging_config(log_config_path)
def _load_logging_config(log_config_path: str) -> None:
"""
@@ -318,19 +343,14 @@ def setup_logging(
hs: "HomeServer",
config: "HomeServerConfig",
use_worker_options: bool = False,
logBeginner: LogBeginner = globalLogBeginner,
) -> None:
"""
Set up the logging subsystem.
Args:
config: configuration data
use_worker_options: True to use the 'worker_log_config' option
instead of 'log_config'.
logBeginner: The Twisted logBeginner to use.
"""
from twisted.internet import reactor
@@ -341,11 +361,17 @@ def setup_logging(
)
# Perform one-time logging configuration.
_setup_stdlib_logging(config, log_config_path, logBeginner=logBeginner)
one_time_logging_setup()
# Configure logging.
_setup_stdlib_logging(config, log_config_path)
# Add a SIGHUP handler to reload the logging configuration, if one is available.
from synapse.app import _base as appbase
appbase.register_sighup(hs, _reload_logging_config, log_config_path)
# We only need to reload the config if there is a log config file path provided to
# reload from.
if log_config_path:
appbase.register_sighup(hs, _reload_logging_config, log_config_path)
# Log immediately so we can grep backwards.
logger.warning("***** STARTING SERVER *****")

View File

@@ -33,7 +33,7 @@ from twisted.internet.protocol import ServerFactory
from twisted.logger import LogBeginner, LogPublisher
from twisted.protocols.basic import LineOnlyReceiver
from synapse.config.logger import _setup_stdlib_logging
from synapse.config.logger import _setup_stdlib_logging, one_time_logging_setup
from synapse.logging import RemoteHandler
from synapse.synapse_rust import reset_logging_config
from synapse.types import ISynapseReactor
@@ -115,10 +115,10 @@ async def main(reactor: ISynapseReactor, loops: int) -> float:
}
logger = logging.getLogger("synapse")
one_time_logging_setup(logBeginner=beginner)
_setup_stdlib_logging(
hs_config, # type: ignore[arg-type]
None,
logBeginner=beginner,
)
# Force a new logging config without having to load it from a file.