commit 7f9f673ee9a54d25113ba0e582b50f4e9b867ab0 Author: Jiayi Yin Date: Tue Jun 6 18:32:16 2023 +0800 Initial commit diff --git a/Change-length-limit-of-hostname-from-255-to-64.patch b/Change-length-limit-of-hostname-from-255-to-64.patch new file mode 100644 index 0000000..ecbab3c --- /dev/null +++ b/Change-length-limit-of-hostname-from-255-to-64.patch @@ -0,0 +1,27 @@ +From f4a9c3a8588ec5a6563d76a0a4d319e078912f01 Mon Sep 17 00:00:00 2001 +From: sherlock2010 <15151851377@163.com> +Date: Thu, 10 Dec 2020 14:26:12 +0800 +Subject: [PATCH] Change length limit of hostname from 255 to 64 + +--- + pyanaconda/network.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/network.py b/pyanaconda/network.py +index 6aab84a..00ccd0a 100644 +--- a/pyanaconda/network.py ++++ b/pyanaconda/network.py +@@ -109,8 +109,8 @@ def is_valid_hostname(hostname, local=False): + if not hostname: + return (False, _("Host name cannot be None or an empty string.")) + +- if len(hostname) > 255: +- return (False, _("Host name must be 255 or fewer characters in length.")) ++ if len(hostname) > 64: ++ return (False, _("Host name must be 64 or fewer characters in length.")) + + if local and hostname[-1] == ".": + return (False, _("Local host name must not end with period '.'.")) +-- +1.8.3.1 + diff --git a/Change-topbar-background-size.patch b/Change-topbar-background-size.patch new file mode 100644 index 0000000..c2ea224 --- /dev/null +++ b/Change-topbar-background-size.patch @@ -0,0 +1,13 @@ +diff -uNrp a/data/anaconda-gtk.css b/data/anaconda-gtk.css +--- a/data/anaconda-gtk.css 2019-08-21 18:50:27.188000000 +0800 ++++ b/data/anaconda-gtk.css 2019-08-21 18:52:23.172000000 +0800 +@@ -112,7 +112,8 @@ levelbar.discrete trough block.filled.hi + AnacondaSpokeWindow #nav-box { + background-color: @product_bg_color; + background-image: url('/usr/share/anaconda/pixmaps/topbar-bg.png'); +- background-repeat: repeat; ++ background-repeat: no-repeat; ++ background-size: 100% 100%; + color: white; + } + diff --git a/Fix-hiding-of-network-device-activation-switch.patch b/Fix-hiding-of-network-device-activation-switch.patch new file mode 100644 index 0000000..f13aa04 --- /dev/null +++ b/Fix-hiding-of-network-device-activation-switch.patch @@ -0,0 +1,29 @@ +From 3216e5fc1c39354e66c977f76465303ea2a11859 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Thu, 18 Jun 2020 16:45:27 +0800 +Subject: [PATCH] Fix hiding of network device activation switch + +--- + pyanaconda/ui/gui/spokes/network.py | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/network.py b/pyanaconda/ui/gui/spokes/network.py +index d6c1e4b..1318e17 100644 +--- a/pyanaconda/ui/gui/spokes/network.py ++++ b/pyanaconda/ui/gui/spokes/network.py +@@ -1019,8 +1019,10 @@ class NetworkControlBox(GObject.GObject): + + switch = self.builder.get_object("device_%s_off_switch" % dev_type_str) + if dev_type_str == "wired": +- switch.set_visible(state not in (NM.DeviceState.UNAVAILABLE, +- NM.DeviceState.UNMANAGED)) ++ visible = state not in (NM.DeviceState.UNAVAILABLE, NM.DeviceState.UNMANAGED) ++ switch.set_visible(visible) ++ if not visible: ++ switch.set_no_show_all(True) + self._updating_device = True + switch.set_active(state not in (NM.DeviceState.UNMANAGED, + NM.DeviceState.UNAVAILABLE, +-- +2.23.0 + diff --git a/add-boot-args-for-smmu-and-video.patch b/add-boot-args-for-smmu-and-video.patch new file mode 100644 index 0000000..f4d8fc7 --- /dev/null +++ b/add-boot-args-for-smmu-and-video.patch @@ -0,0 +1,31 @@ +From 99970253a5600555118a64046259b7ef638655df Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Fri, 11 Sep 2020 11:26:41 +0800 +Subject: [PATCH] add boot args for smmu and video + +--- + pyanaconda/modules/storage/bootloader/grub2.py | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/pyanaconda/modules/storage/bootloader/grub2.py b/pyanaconda/modules/storage/bootloader/grub2.py +index 17e46e5..a70ba7a 100644 +--- a/pyanaconda/modules/storage/bootloader/grub2.py ++++ b/pyanaconda/modules/storage/bootloader/grub2.py +@@ -271,6 +271,14 @@ class GRUB2(BootLoader): + if blivet.arch.is_aarch64(): + log.info("check boot args:%s", arg_str) + arg_str += " crashkernel=1024M,high" ++ if "smmu.bypassdev=0x1000:0x17" not in arg_str: ++ arg_str += " smmu.bypassdev=0x1000:0x17" ++ if "smmu.bypassdev=0x1000:0x15" not in arg_str: ++ arg_str += " smmu.bypassdev=0x1000:0x15" ++ if "video=efifb:off" not in arg_str: ++ arg_str += " video=efifb:off" ++ if "video=VGA-1:640x480-32@60me" not in arg_str: ++ arg_str += " video=VGA-1:640x480-32@60me" + else: + arg_str += " crashkernel=512M" + +-- +2.23.0 + diff --git a/add-passwd-policy.patch b/add-passwd-policy.patch new file mode 100644 index 0000000..7450c0d --- /dev/null +++ b/add-passwd-policy.patch @@ -0,0 +1,29 @@ +From e8ce5155f21af58e119c61e10895fcb5d8c21995 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Thu, 18 Jun 2020 17:00:13 +0800 +Subject: [PATCH] add passwd policy + +--- + data/interactive-defaults.ks | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/data/interactive-defaults.ks b/data/interactive-defaults.ks +index a906c8c..0177cf9 100644 +--- a/data/interactive-defaults.ks ++++ b/data/interactive-defaults.ks +@@ -4,9 +4,9 @@ firstboot --enable + + %anaconda + # Default password policies +-pwpolicy root --notstrict --minlen=6 --minquality=1 --nochanges --notempty +-pwpolicy user --notstrict --minlen=6 --minquality=1 --nochanges --emptyok +-pwpolicy luks --notstrict --minlen=6 --minquality=1 --nochanges --notempty ++pwpolicy root --notstrict --minlen=8 --minquality=1 --nochanges --notempty ++pwpolicy user --notstrict --minlen=8 --minquality=1 --nochanges --emptyok ++pwpolicy luks --notstrict --minlen=8 --minquality=1 --nochanges --notempty + # NOTE: This applies only to *fully* interactive installations, partial kickstart + # installations use defaults specified in pyanaconda/pwpolicy.py. + # Automated kickstart installs simply ignore the password policy as the policy +-- +2.23.0 + diff --git a/anaconda-33.19.tar.bz2 b/anaconda-33.19.tar.bz2 new file mode 100644 index 0000000..d7f7a05 Binary files /dev/null and b/anaconda-33.19.tar.bz2 differ diff --git a/anaconda-Allow-to-detect-devices-with-the-iso9660-file-system.patch b/anaconda-Allow-to-detect-devices-with-the-iso9660-file-system.patch new file mode 100644 index 0000000..146cd2b --- /dev/null +++ b/anaconda-Allow-to-detect-devices-with-the-iso9660-file-system.patch @@ -0,0 +1,127 @@ +From 4e699fe30da24771b80ff1fe64d7791bcb444f79 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Tue, 21 Jul 2020 10:57:00 +0200 +Subject: [PATCH] Allow to detect devices with the iso9660 file system as + optical media + +Test that the DBus method FindOpticalMedia identifies devices with the iso9660 file +system as optical media, so it is able to find NVDIMM devices with iso9660. + +The DBus method GetDevicesToIgnore of the NVDIMM module shouldn't return NVDIMM +devices with the iso9660 file system. They can be used as an installation source. + +Protect all devices with the iso9660 file system. It will protect, for example, NVDIMM +devices with the iso9660 file system that can be used only as an installation source +anyway. + +Related: rhbz#1856264 +--- + .../modules/storage/devicetree/model.py | 5 ++++ + pyanaconda/modules/storage/nvdimm/nvdimm.py | 12 ++++++++- + .../module_device_tree_test.py | 27 ++++++++++++++++--- + 3 files changed, 39 insertions(+), 5 deletions(-) + +diff --git a/pyanaconda/modules/storage/devicetree/model.py b/pyanaconda/modules/storage/devicetree/model.py +index 4d0ecdb..cdee5a8 100644 +--- a/pyanaconda/modules/storage/devicetree/model.py ++++ b/pyanaconda/modules/storage/devicetree/model.py +@@ -303,6 +303,11 @@ class InstallerStorage(Blivet): + # cdroms, involves unmounting which is undesirable (see bug #1671713). + protected.extend(dev for dev in self.devicetree.devices if dev.type == "cdrom") + ++ # Protect also all devices with an iso9660 file system. It will protect ++ # NVDIMM devices that can be used only as an installation source anyway ++ # (see the bug #1856264). ++ protected.extend(dev for dev in self.devicetree.devices if dev.format.type == "iso9660") ++ + # Mark the collected devices as protected. + for dev in protected: + log.debug("Marking device %s as protected.", dev.name) +diff --git a/pyanaconda/modules/storage/nvdimm/nvdimm.py b/pyanaconda/modules/storage/nvdimm/nvdimm.py +index 0bbcc6e..4476dd1 100644 +--- a/pyanaconda/modules/storage/nvdimm/nvdimm.py ++++ b/pyanaconda/modules/storage/nvdimm/nvdimm.py +@@ -21,6 +21,7 @@ import gi + gi.require_version("BlockDev", "2.0") + from gi.repository import BlockDev as blockdev + ++from blivet import udev + from blivet.static_data import nvdimm + + from pykickstart.constants import NVDIMM_ACTION_RECONFIGURE, NVDIMM_ACTION_USE +@@ -90,6 +91,9 @@ class NVDIMMModule(KickstartBaseModule): + installation, the device(s) must be specified by nvdimm kickstart + command. Also, only devices in sector mode are allowed. + ++ Don't ignore devices that have an iso9660 file system. We might ++ want to use them as an installation source. ++ + :return: a set of device names + """ + namespaces_to_use = self.get_namespaces_to_use() +@@ -97,7 +101,13 @@ class NVDIMMModule(KickstartBaseModule): + devices_to_ignore = set() + + for ns_name, ns_info in nvdimm.namespaces.items(): +- if ns_info.mode != blockdev.NVDIMMNamespaceMode.SECTOR: ++ info = udev.get_device(device_node="/dev/" + ns_info.blockdev) ++ ++ if info and udev.device_get_format(info) == "iso9660": ++ log.debug("%s / %s won't be ignored - NVDIMM device has " ++ "an iso9660 file system", ns_name, ns_info.blockdev) ++ continue ++ elif ns_info.mode != blockdev.NVDIMMNamespaceMode.SECTOR: + log.debug("%s / %s will be ignored - NVDIMM device is not " + "in sector mode", ns_name, ns_info.blockdev) + elif ns_name not in namespaces_to_use and ns_info.blockdev not in devices_to_use: +diff --git a/tests/nosetests/pyanaconda_tests/module_device_tree_test.py b/tests/nosetests/pyanaconda_tests/module_device_tree_test.py +index 838c70e..5e52843 100644 +--- a/tests/nosetests/pyanaconda_tests/module_device_tree_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_device_tree_test.py +@@ -24,10 +24,10 @@ from unittest.mock import patch, Mock, PropertyMock + from tests.nosetests.pyanaconda_tests import patch_dbus_publish_object, check_task_creation + + from blivet.devices import StorageDevice, DiskDevice, DASDDevice, ZFCPDiskDevice, PartitionDevice, \ +- LUKSDevice, iScsiDiskDevice, NVDIMMNamespaceDevice, FcoeDiskDevice ++ LUKSDevice, iScsiDiskDevice, NVDIMMNamespaceDevice, FcoeDiskDevice, OpticalDevice + from blivet.errors import StorageError, FSError + from blivet.formats import get_format +-from blivet.formats.fs import FS ++from blivet.formats.fs import FS, Iso9660FS + from blivet.formats.luks import LUKS + from blivet.size import Size + +@@ -627,9 +627,28 @@ class DeviceTreeInterfaceTestCase(unittest.TestCase): + str(cm.exception), "Failed to unmount dev1 from /path: Fake error." + ) + +- def find_install_media_test(self): ++ @patch.object(Iso9660FS, "check_module") ++ def find_install_media_test(self, check_module): + """Test FindInstallMedia.""" +- self.assertEqual(self.interface.FindOpticalMedia(), []) ++ dev1 = OpticalDevice("dev1") ++ dev1.size = Size("2 GiB") ++ dev1.format = get_format("iso9660") ++ dev1.controllable = True ++ self._add_device(dev1) ++ ++ dev2 = StorageDevice("dev2") ++ dev2.size = Size("2 GiB") ++ dev2.format = get_format("iso9660") ++ dev2.controllable = True ++ self._add_device(dev2) ++ ++ dev3 = StorageDevice("dev3") ++ dev3.size = Size("2 GiB") ++ dev3.format = get_format("ext4") ++ dev3.controllable = True ++ self._add_device(dev3) ++ ++ self.assertEqual(self.interface.FindOpticalMedia(), ["dev1", "dev2"]) + + @patch.object(FS, "update_size_info") + def find_mountable_partitions_test(self, update_size_info): +-- +2.23.0 + diff --git a/anaconda-Fix-stage2-as-default-sources.patch b/anaconda-Fix-stage2-as-default-sources.patch new file mode 100644 index 0000000..405c8dd --- /dev/null +++ b/anaconda-Fix-stage2-as-default-sources.patch @@ -0,0 +1,642 @@ +From 5283a20d41050551b54d6d12960ac28e0e5e1648 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Tue, 21 Jul 2020 11:17:40 +0200 +Subject: [PATCH] Fix stage2 as default sources + +We should prioritize stage2 device as the default source. +This is especially needed for DVD ISO because it is booting +with inst.stage2 instead and we should use the DVD +as the source for the default CDROM. +The situation is even worse thanks to the fact that +DVD ISOs are using inst.stage2=hd:... + +Find stage2 device and test this device first during +the auto-discover feature of CDRom source. + +Resolves: rhbz#1856264 +--- + .../modules/payloads/source/cdrom/cdrom.py | 7 +- + .../payloads/source/cdrom/cdrom_interface.py | 7 +- + .../payloads/source/cdrom/initialization.py | 64 +++- + pyanaconda/modules/payloads/source/utils.py | 8 +- + .../modules/storage/devicetree/handler.py | 5 +- + .../storage/devicetree/handler_interface.py | 5 +- + pyanaconda/payload/utils.py | 2 +- + .../module_device_tree_test.py | 23 +- + .../module_source_cdrom_test.py | 286 ++++++++++++++++-- + 9 files changed, 362 insertions(+), 45 deletions(-) + +diff --git a/pyanaconda/modules/payloads/source/cdrom/cdrom.py b/pyanaconda/modules/payloads/source/cdrom/cdrom.py +index 93df362..bb751ae 100644 +--- a/pyanaconda/modules/payloads/source/cdrom/cdrom.py ++++ b/pyanaconda/modules/payloads/source/cdrom/cdrom.py +@@ -32,7 +32,12 @@ log = get_module_logger(__name__) + + + class CdromSourceModule(PayloadSourceBase, MountingSourceMixin, RPMSourceMixin): +- """The CD-ROM source payload module.""" ++ """The CD-ROM source payload module. ++ ++ This source will try to automatically detect installation source. First it tries to look only ++ stage2 device used to boot the environment then it will use first valid iso9660 media with a ++ valid structure. ++ """ + + def __init__(self): + super().__init__() +diff --git a/pyanaconda/modules/payloads/source/cdrom/cdrom_interface.py b/pyanaconda/modules/payloads/source/cdrom/cdrom_interface.py +index 0c5b6d7..74d2f14 100644 +--- a/pyanaconda/modules/payloads/source/cdrom/cdrom_interface.py ++++ b/pyanaconda/modules/payloads/source/cdrom/cdrom_interface.py +@@ -25,7 +25,12 @@ from pyanaconda.modules.payloads.source.source_base_interface import PayloadSour + + @dbus_interface(PAYLOAD_SOURCE_CDROM.interface_name) + class CdromSourceInterface(PayloadSourceBaseInterface): +- """Interface for the payload CD-ROM image source.""" ++ """Interface for the payload CD-ROM image source. ++ ++ This source will try to automatically detect installation source. First it tries to look only ++ stage2 device used to boot the environment then it will use first valid iso9660 media with a ++ valid structure. ++ """ + + def connect_signals(self): + super().connect_signals() +diff --git a/pyanaconda/modules/payloads/source/cdrom/initialization.py b/pyanaconda/modules/payloads/source/cdrom/initialization.py +index a182fcd..7fc38fc 100644 +--- a/pyanaconda/modules/payloads/source/cdrom/initialization.py ++++ b/pyanaconda/modules/payloads/source/cdrom/initialization.py +@@ -15,13 +15,15 @@ + # License and may only be used or replicated with the express permission of + # Red Hat, Inc. + # ++from pyanaconda.core.kernel import kernel_arguments + from pyanaconda.modules.common.constants.objects import DEVICE_TREE + from pyanaconda.modules.common.constants.services import STORAGE + from pyanaconda.modules.common.errors.payload import SourceSetupError +-from pyanaconda.modules.payloads.source.mount_tasks import SetUpMountTask + from pyanaconda.modules.common.structures.storage import DeviceData +-from pyanaconda.payload.utils import mount, unmount, PayloadSetupError ++from pyanaconda.modules.payloads.source.mount_tasks import SetUpMountTask + from pyanaconda.modules.payloads.source.utils import is_valid_install_disk ++from pyanaconda.payload.source.factory import SourceFactory, PayloadSourceTypeUnrecognized ++from pyanaconda.payload.utils import mount, unmount, PayloadSetupError + + from pyanaconda.anaconda_loggers import get_module_logger + log = get_module_logger(__name__) +@@ -37,13 +39,62 @@ class SetUpCdromSourceTask(SetUpMountTask): + return "Set up CD-ROM Installation Source" + + def _do_mount(self): +- """Run CD-ROM installation source setup.""" +- log.debug("Trying to detect CD-ROM automatically") ++ """Run CD-ROM installation source setup. + ++ Try to discover installation media and mount that. Device used for booting (inst.stage2) ++ has a priority. ++ """ ++ log.debug("Trying to detect CD-ROM automatically") + device_tree = STORAGE.get_proxy(DEVICE_TREE) ++ ++ device_candidates = self._get_device_candidate_list(device_tree) ++ device_name = self._choose_installation_device(device_tree, device_candidates) ++ ++ if not device_name: ++ raise SourceSetupError("Found no CD-ROM") ++ ++ return device_name ++ ++ def _get_device_candidate_list(self, device_tree): ++ stage2_device = self._probe_stage2_for_cdrom(device_tree) ++ device_candidates = device_tree.FindOpticalMedia() ++ ++ if stage2_device in device_candidates: ++ device_candidates = [stage2_device] + device_candidates ++ ++ return device_candidates ++ ++ @staticmethod ++ def _probe_stage2_for_cdrom(device_tree): ++ # TODO: This is temporary method which should be moved closer to the inst.repo logic ++ log.debug("Testing if inst.stage2 is a CDROM device") ++ stage2_string = kernel_arguments.get("stage2") ++ ++ if not stage2_string: ++ return None ++ ++ try: ++ source = SourceFactory.parse_repo_cmdline_string(stage2_string) ++ except PayloadSourceTypeUnrecognized: ++ log.warning("Unknown stage2 method: %s", stage2_string) ++ return None ++ ++ # We have HDD here because DVD ISO has inst.stage2=hd:LABEL=.... ++ # TODO: Let's return back support of inst.cdrom= which should work based on the ++ # documentation and use that as inst.stage2 parameter for Pungi ++ if not source.is_harddrive: ++ log.debug("Stage2 can't be used as source %s", stage2_string) ++ return None ++ ++ # We can ignore source.path here because DVD ISOs are not using that. ++ stage2_device = device_tree.ResolveDevice(source.partition) ++ log.debug("Found possible stage2 default installation source %s", stage2_device) ++ return stage2_device ++ ++ def _choose_installation_device(self, device_tree, devices_candidates): + device_name = "" + +- for dev_name in device_tree.FindOpticalMedia(): ++ for dev_name in devices_candidates: + try: + device_data = DeviceData.from_structure(device_tree.GetDeviceData(dev_name)) + mount(device_data.path, self._target_mount, "iso9660", "ro") +@@ -57,7 +108,4 @@ class SetUpCdromSourceTask(SetUpMountTask): + else: + unmount(self._target_mount) + +- if not device_name: +- raise SourceSetupError("Found no CD-ROM") +- + return device_name +diff --git a/pyanaconda/modules/payloads/source/utils.py b/pyanaconda/modules/payloads/source/utils.py +index b9642a9..5030fc5 100644 +--- a/pyanaconda/modules/payloads/source/utils.py ++++ b/pyanaconda/modules/payloads/source/utils.py +@@ -84,10 +84,10 @@ def find_and_mount_device(device_spec, mount_point): + device_path = "/dev/" + matches[0] + + try: +- # FIXME: Add back RO mounting. This was removed because we can't mount one source +- # RW and RO at the same time. This source is also mounted by IsoChooser dialog in the +- # SourceSpoke. +- mount(device_path, mount_point, "auto") ++ mount(device=device_path, ++ mountpoint=mount_point, ++ fstype="auto", ++ options="defaults,ro") + return True + except OSError as e: + log.error("Mount of device failed: %s", e) +diff --git a/pyanaconda/modules/storage/devicetree/handler.py b/pyanaconda/modules/storage/devicetree/handler.py +index 1fca6c0..453f27d 100644 +--- a/pyanaconda/modules/storage/devicetree/handler.py ++++ b/pyanaconda/modules/storage/devicetree/handler.py +@@ -82,16 +82,17 @@ class DeviceTreeHandler(ABC): + msg = "Failed to tear down {}: {}".format(device_name, str(e)) + raise DeviceSetupError(msg) from None + +- def mount_device(self, device_name, mount_point): ++ def mount_device(self, device_name, mount_point, options): + """Mount a filesystem on the device. + + :param device_name: a name of the device + :param mount_point: a path to the mount point ++ :param options: a string with mount options or an empty string to use defaults + :raise: MountFilesystemError if mount fails + """ + device = self._get_device(device_name) + try: +- device.format.mount(mountpoint=mount_point) ++ device.format.mount(mountpoint=mount_point, options=options or None) + except FSError as e: + msg = "Failed to mount {} at {}: {}". format( + device_name, +diff --git a/pyanaconda/modules/storage/devicetree/handler_interface.py b/pyanaconda/modules/storage/devicetree/handler_interface.py +index 3839e17..2a16eb7 100644 +--- a/pyanaconda/modules/storage/devicetree/handler_interface.py ++++ b/pyanaconda/modules/storage/devicetree/handler_interface.py +@@ -46,14 +46,15 @@ class DeviceTreeHandlerInterface(InterfaceTemplate): + """ + self.implementation.teardown_device(device_name) + +- def MountDevice(self, device_name: Str, mount_point: Str): ++ def MountDevice(self, device_name: Str, mount_point: Str, options: Str): + """Mount a filesystem on the device. + + :param device_name: a name of the device + :param mount_point: a path to the mount point ++ :param options: a string with mount options or an empty string to use defaults + :raise: MountFilesystemError if mount fails + """ +- self.implementation.mount_device(device_name, mount_point) ++ self.implementation.mount_device(device_name, mount_point, options) + + def UnmountDevice(self, device_name: Str, mount_point: Str): + """Unmount a filesystem on the device. +diff --git a/pyanaconda/payload/utils.py b/pyanaconda/payload/utils.py +index e0c7d6c..eb94f79 100644 +--- a/pyanaconda/payload/utils.py ++++ b/pyanaconda/payload/utils.py +@@ -71,7 +71,7 @@ def mount_device(device_name, mount_point): + :param str mount_point: a path to the mount point + """ + device_tree = STORAGE.get_proxy(DEVICE_TREE) +- device_tree.MountDevice(device_name, mount_point) ++ device_tree.MountDevice(device_name, mount_point, "ro") + + + def unmount_device(device_name, mount_point): +diff --git a/tests/nosetests/pyanaconda_tests/module_device_tree_test.py b/tests/nosetests/pyanaconda_tests/module_device_tree_test.py +index 33b06e8..838c70e 100644 +--- a/tests/nosetests/pyanaconda_tests/module_device_tree_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_device_tree_test.py +@@ -582,12 +582,29 @@ class DeviceTreeInterfaceTestCase(unittest.TestCase): + self._add_device(StorageDevice("dev1", fmt=get_format("ext4"))) + + with tempfile.TemporaryDirectory() as d: +- self.interface.MountDevice("dev1", d) +- mount.assert_called_once_with(mountpoint=d) ++ self.interface.MountDevice("dev1", d, "") ++ mount.assert_called_once_with(mountpoint=d, options=None) + + mount.side_effect = FSError("Fake error.") + with self.assertRaises(MountFilesystemError) as cm: +- self.interface.MountDevice("dev1", "/path") ++ self.interface.MountDevice("dev1", "/path", "") ++ ++ self.assertEqual( ++ str(cm.exception), "Failed to mount dev1 at /path: Fake error." ++ ) ++ ++ @patch.object(FS, "mount") ++ def mount_device_with_options_test(self, mount): ++ """Test MountDevice with options specified.""" ++ self._add_device(StorageDevice("dev1", fmt=get_format("ext4"))) ++ ++ with tempfile.TemporaryDirectory() as d: ++ self.interface.MountDevice("dev1", d, "ro,auto") ++ mount.assert_called_once_with(mountpoint=d, options="ro,auto") ++ ++ mount.side_effect = FSError("Fake error.") ++ with self.assertRaises(MountFilesystemError) as cm: ++ self.interface.MountDevice("dev1", "/path", "ro,auto") + + self.assertEqual( + str(cm.exception), "Failed to mount dev1 at /path: Fake error." +diff --git a/tests/nosetests/pyanaconda_tests/module_source_cdrom_test.py b/tests/nosetests/pyanaconda_tests/module_source_cdrom_test.py +index 386322d..4c964a7 100644 +--- a/tests/nosetests/pyanaconda_tests/module_source_cdrom_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_source_cdrom_test.py +@@ -124,6 +124,8 @@ class CdromSourceTestCase(unittest.TestCase): + + class CdromSourceSetupTaskTestCase(unittest.TestCase): + ++ # TODO: To avoid so much patching it would be great to split tests to parts and test those ++ + mount_location = "/mnt/put-cdrom-here" + + def setup_install_source_task_name_test(self): +@@ -156,8 +158,15 @@ class CdromSourceSetupTaskTestCase(unittest.TestCase): + device_tree.FindOpticalMedia = Mock() + device_tree.FindOpticalMedia.return_value = [dev.name for dev in devices] + ++ def _find_device_by_name(name): ++ for dev in devices: ++ if dev.name == name: ++ return DeviceData.to_structure(dev) ++ ++ return None ++ + device_tree.GetDeviceData = Mock() +- device_tree.GetDeviceData.side_effect = [DeviceData.to_structure(dev) for dev in devices] ++ device_tree.GetDeviceData.side_effect = _find_device_by_name + + return device_tree + +@@ -172,42 +181,261 @@ class CdromSourceSetupTaskTestCase(unittest.TestCase): + This matches the logic in tested method. + """ + for n in range(num_called): +- self.assertIn( +- call("test{}".format(n)), +- device_tree_mock.GetDeviceData.mock_calls +- ) +- self.assertIn( +- call("/dev/cdrom-test{}".format(n), self.mount_location, "iso9660", "ro"), +- mount_mock.mock_calls +- ) ++ self._check_if_device_was_tried(device_tree_mock, ++ mount_mock, ++ "test{}".format(n)) + + for n in range(num_called, num_called + num_untouched): +- self.assertNotIn( +- call("test{}".format(n)), +- device_tree_mock.GetDeviceData.mock_calls +- ) +- self.assertNotIn( +- call("/dev/cdrom-test{}".format(n), self.mount_location, "iso9660", "ro"), +- mount_mock.mock_calls +- ) ++ self._check_if_device_was_not_tried(device_tree_mock, ++ mount_mock, ++ "test{}".format(n)) + + self.assertEqual(device_tree_mock.GetDeviceData.call_count, num_called) + self.assertEqual(mount_mock.call_count, num_called) + ++ def _check_if_device_was_tried(self, ++ device_tree_mock, ++ mount_mock, ++ device_name): ++ self.assertIn( ++ call(device_name), ++ device_tree_mock.GetDeviceData.mock_calls ++ ) ++ ++ self.assertIn( ++ call("/dev/cdrom-{}".format(device_name), self.mount_location, "iso9660", "ro"), ++ mount_mock.mock_calls ++ ) ++ ++ def _check_if_device_was_not_tried(self, ++ device_tree_mock, ++ mount_mock, ++ device_name): ++ self.assertNotIn( ++ call(device_name), ++ device_tree_mock.GetDeviceData.mock_calls ++ ) ++ ++ self.assertNotIn( ++ call("/dev/cdrom-{}".format(device_name), self.mount_location, "iso9660", "ro"), ++ mount_mock.mock_calls ++ ) ++ ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.kernel_arguments") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.is_valid_install_disk") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.unmount") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.mount") ++ @patch_dbus_get_proxy ++ def priority_stage2_cdrom_test(self, ++ proxy_getter, ++ mount_mock, ++ unmount_mock, ++ valid_mock, ++ kernel_arguments_mock): ++ """Test CD-ROM Source setup installation source task run - prioritize inst.stage2 CD-ROMs. ++ ++ Add valid stage2 CDROM device and it has to be tested first. ++ """ ++ kernel_arguments_mock.get.return_value = "hd:LABEL=my-cool-dvd" ++ device_tree = self.set_up_device_tree(2) ++ device_tree.ResolveDevice.return_value = "test1" ++ proxy_getter.return_value = device_tree ++ valid_mock.return_value = True ++ ++ task = SetUpCdromSourceTask(self.mount_location) ++ result = task.run() ++ ++ # Only one device was checked ++ device_tree.ResolveDevice.assert_called_once_with("LABEL=my-cool-dvd") ++ ++ self._check_if_device_was_tried(device_tree, mount_mock, "test1") ++ self._check_if_device_was_not_tried(device_tree, mount_mock, "test0") ++ ++ # First device (stage2 device) is valid one ++ valid_mock.assert_called_once() ++ ++ # First device works so no unmount is called here ++ unmount_mock.assert_not_called() ++ ++ # Test device name returned ++ self.assertEqual(result, "test1") ++ ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.kernel_arguments") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.is_valid_install_disk") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.unmount") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.mount") ++ @patch_dbus_get_proxy ++ def priority_stage2_unrecognized_source_cdrom_test(self, ++ proxy_getter, ++ mount_mock, ++ unmount_mock, ++ valid_mock, ++ kernel_arguments_mock): ++ """Test CD-ROM Source setup installation source task run - unrecognized stage2 source. ++ ++ This should not happen but when we have the code there let's check it. ++ """ ++ kernel_arguments_mock.get.return_value = "wrong source!" ++ device_tree = self.set_up_device_tree(1) ++ proxy_getter.return_value = device_tree ++ valid_mock.return_value = True ++ ++ task = SetUpCdromSourceTask(self.mount_location) ++ result = task.run() ++ ++ device_tree.ResolveDevice.assert_not_called() ++ ++ # 1/2 devices tried, 1/2 untried ++ self.assert_resolve_and_mount_calls(device_tree, mount_mock, 1, 1) ++ ++ # Only first was mounted ++ self.assertEqual(valid_mock.call_count, 1) ++ ++ # First device was used no unmount should be called ++ unmount_mock.assert_not_called() ++ ++ # Test device name returned ++ self.assertEqual(result, "test0") ++ ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.kernel_arguments") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.is_valid_install_disk") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.unmount") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.mount") ++ @patch_dbus_get_proxy ++ def priority_stage2_not_hdd_source_cdrom_test(self, ++ proxy_getter, ++ mount_mock, ++ unmount_mock, ++ valid_mock, ++ kernel_arguments_mock): ++ """Test CD-ROM Source setup installation source task run - stage2 is not HDD source. ++ ++ We are testing HDD because DVD ISOs are created with inst.stage2=hd: . We want to change ++ this behavior on master so let's change this test too then. ++ ++ TODO: Change this test when DVD ISOs will use cdrom: instead of inst.stage2=hd:... ++ """ ++ kernel_arguments_mock.get.return_value = "nfs:test.org:/super/cool/path" ++ device_tree = self.set_up_device_tree(1) ++ proxy_getter.return_value = device_tree ++ valid_mock.return_value = True ++ ++ task = SetUpCdromSourceTask(self.mount_location) ++ result = task.run() ++ ++ device_tree.ResolveDevice.assert_not_called() ++ ++ # 1/2 devices tried, 1/2 untried ++ self.assert_resolve_and_mount_calls(device_tree, mount_mock, 1, 1) ++ ++ # Only first was mounted ++ self.assertEqual(valid_mock.call_count, 1) ++ ++ # First device was used no unmount should be called ++ unmount_mock.assert_not_called() ++ ++ # Test device name returned ++ self.assertEqual(result, "test0") ++ ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.kernel_arguments") + @patch("pyanaconda.modules.payloads.source.cdrom.initialization.is_valid_install_disk") + @patch("pyanaconda.modules.payloads.source.cdrom.initialization.unmount") + @patch("pyanaconda.modules.payloads.source.cdrom.initialization.mount") + @patch_dbus_get_proxy +- def choose_from_multiple_cdroms_test(self, proxy_getter, mount_mock, unmount_mock, valid_mock): ++ def priority_stage2_cant_be_resolved_source_cdrom_test(self, ++ proxy_getter, ++ mount_mock, ++ unmount_mock, ++ valid_mock, ++ kernel_arguments_mock): ++ """Test CD-ROM Source setup installation source task run - can't resolve stage2 device. ++ ++ Stage2 device can't be resolved. This should not happen but let's make sure the code works. ++ """ ++ kernel_arguments_mock.get.return_value = "hd:LABEL=my-cool-dvd" ++ device_tree = self.set_up_device_tree(1) ++ proxy_getter.return_value = device_tree ++ # When device can't be resolved it returns an empty string. ++ device_tree.ResolveDevice.return_value = "" ++ valid_mock.return_value = True ++ ++ task = SetUpCdromSourceTask(self.mount_location) ++ result = task.run() ++ ++ self._check_if_device_was_not_tried(device_tree, mount_mock, "") ++ ++ # 1/2 devices tried, 1/2 untried ++ self.assert_resolve_and_mount_calls(device_tree, mount_mock, 1, 1) ++ ++ # Only first was mounted ++ self.assertEqual(valid_mock.call_count, 1) ++ ++ # First device was used no unmount should be called ++ unmount_mock.assert_not_called() ++ ++ # Test device name returned ++ self.assertEqual(result, "test0") ++ ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.kernel_arguments") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.is_valid_install_disk") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.unmount") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.mount") ++ @patch_dbus_get_proxy ++ def priority_stage2_not_optical_media_cdrom_test(self, ++ proxy_getter, ++ mount_mock, ++ unmount_mock, ++ valid_mock, ++ kernel_arguments_mock): ++ """Test CD-ROM Source setup installation source task run - stage2 is not optical media. ++ ++ We should not pick stage2 device if it is not an optical_media which means type iso9660. ++ """ ++ kernel_arguments_mock.get.return_value = "hd:LABEL=correct-device" ++ device_tree = self.set_up_device_tree(1) ++ device_tree.ResolveDevice.return_value = "not-optical-media" ++ proxy_getter.return_value = device_tree ++ valid_mock.return_value = True ++ ++ task = SetUpCdromSourceTask(self.mount_location) ++ result = task.run() ++ ++ device_tree.ResolveDevice.assert_called_once_with("LABEL=correct-device") ++ ++ self._check_if_device_was_not_tried(device_tree, mount_mock, "correct-device") ++ ++ # 1/2 devices tried, 1/2 untried ++ self.assert_resolve_and_mount_calls(device_tree, mount_mock, 1, 1) ++ ++ # Only first was mounted ++ self.assertEqual(valid_mock.call_count, 1) ++ ++ # First device was used no unmount should be called ++ unmount_mock.assert_not_called() ++ ++ # Test device name returned ++ self.assertEqual(result, "test0") ++ ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.kernel_arguments") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.is_valid_install_disk") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.unmount") ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.mount") ++ @patch_dbus_get_proxy ++ def choose_from_multiple_cdroms_test(self, ++ proxy_getter, ++ mount_mock, ++ unmount_mock, ++ valid_mock, ++ kernel_arguments_mock): + """Test CD-ROM Source setup installation source task run - choice from multiple CD-ROMs. + + Fake four CD-ROM devices: First fails to mount, second has nothing useful, third has what + we want so is left mounted, fourth is entirely skipped. + The other two tests below are needed only to test the exit when nothing is found. + """ ++ kernel_arguments_mock.get.return_value = None + device_tree = self.set_up_device_tree(4) + proxy_getter.return_value = device_tree +- + mount_mock.side_effect = \ + [PayloadSetupError("Mocked failure"), DEFAULT, DEFAULT, DEFAULT] + +@@ -231,18 +459,24 @@ class CdromSourceSetupTaskTestCase(unittest.TestCase): + # Test device name returned + self.assertEqual(result, "test2") + ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.kernel_arguments") + @patch("pyanaconda.modules.payloads.source.cdrom.initialization.is_valid_install_disk") + @patch("pyanaconda.modules.payloads.source.cdrom.initialization.unmount") + @patch("pyanaconda.modules.payloads.source.cdrom.initialization.mount") + @patch_dbus_get_proxy +- def failure_to_mount_test(self, proxy_getter, mount_mock, unmount_mock, valid_mock): ++ def failure_to_mount_test(self, ++ proxy_getter, ++ mount_mock, ++ unmount_mock, ++ valid_mock, ++ kernel_arguments_mock): + """Test CD-ROM Source setup installation source task run - mount failure. + + Mocks one disk which fails to mount, expect exception. + """ ++ kernel_arguments_mock.get.return_value = None + device_tree = self.set_up_device_tree(1) + proxy_getter.return_value = device_tree +- + mount_mock.side_effect = PayloadSetupError("Mocked failure") + valid_mock.return_value = True + +@@ -258,18 +492,24 @@ class CdromSourceSetupTaskTestCase(unittest.TestCase): + # exception happened due to no disk + self.assertEqual(str(cm.exception), "Found no CD-ROM") + ++ @patch("pyanaconda.modules.payloads.source.cdrom.initialization.kernel_arguments") + @patch("pyanaconda.modules.payloads.source.cdrom.initialization.is_valid_install_disk") + @patch("pyanaconda.modules.payloads.source.cdrom.initialization.unmount") + @patch("pyanaconda.modules.payloads.source.cdrom.initialization.mount") + @patch_dbus_get_proxy +- def no_cdrom_with_valid_source_test(self, proxy_getter, mount_mock, unmount_mock, valid_mock): ++ def no_cdrom_with_valid_source_test(self, ++ proxy_getter, ++ mount_mock, ++ unmount_mock, ++ valid_mock, ++ kernel_arguments_mock): + """Test CD-ROM Source setup installation source task run - no valid source CD-ROMs. + + Mocks one CD-ROM device which has nothing useful, expect exception. + """ ++ kernel_arguments_mock.get.return_value = None + device_tree = self.set_up_device_tree(1) + proxy_getter.return_value = device_tree +- + valid_mock.return_value = False + + with self.assertRaises(SourceSetupError) as cm: +-- +2.23.0 + diff --git a/anaconda.spec b/anaconda.spec new file mode 100644 index 0000000..ee5754b --- /dev/null +++ b/anaconda.spec @@ -0,0 +1,342 @@ +%define _empty_manifest_terminate_build 0 +%define _vendor openEuler +Name: anaconda +Version: 33.19 +Release: 36 +Summary: Graphical system installer +License: GPLv2+ and MIT +URL: http://fedoraproject.org/wiki/Anaconda +Source0: https://github.com/rhinstaller/anaconda/archive/%{name}-%{version}.tar.bz2 +Source1: {os_name}.conf + +Patch6000: Fix-hiding-of-network-device-activation-switch.patch + +Patch9000: add-passwd-policy.patch +Patch9001: fix-hostname-info.patch +Patch9002: disable-set-passwd-without-confirmation.patch +Patch9003: bugfix-logo-display-in-low-screen-resolution.patch +Patch9004: make-name-not-force-to-uppercase.patch +Patch9005: bugfix-GUI-nfs-unknown-error.patch +Patch9006: hide-help-button.patch +Patch9007: modify-interface-is-extended-in-Chinese-mode.patch +Patch9008: remove-vender-issue-in-netdev.patch +Patch9009: modify-arguments-parsing.patch +Patch9011: disable-product-name-in-welcome-is-uppercase.patch +Patch9012: modify-default-timezone.patch +Patch9013: modify-network-hostname-dot-illegal.patch +Patch9014: disable-ssh-login-checkbox.patch +Patch9015: bugfix-add-kdump-parameter-into-kernel-cmdline.patch +Patch9016: bugfix-fix-password-policy.patch +Patch9017: add-boot-args-for-smmu-and-video.patch +%if ! 0%{?{os_name}} +Patch9018: disable-disk-encryption.patch +%endif +Patch9019: bugfix-set-up-LD_PRELOAD-for-the-Storage-and-Services-module.patch +Patch9020: bugfix-Propagate-a-lazy-proxy-of-the-storage-model.patch + +Patch6001: anaconda-Fix-stage2-as-default-sources.patch +Patch6002: anaconda-Allow-to-detect-devices-with-the-iso9660-file-system.patch +Patch6003: bugfix-do-not-test-if-repo-is-valid-based-on-treeinfo-file.patch +Patch6004: bugfix-move-verify-valid-installtree-to-source-module-utils.patch +Patch6005: bugfix-add-tests-for-verify-valid-installtree-function.patch +Patch6006: bugfix-rename-function-for-a-simple-check-for-DNF-repository.patch + +Patch9023: bugfix-add-dnf-transaction-timeout.patch + +Patch6007: fix-0-storage-devices-selected.patch +Patch6008: fix-remove-unknow-partition-is-sda-failed.patch +Patch6009: use-modinfo-to-check-ko-before-modprobe.patch +Patch6010: ntp-servers-improve-001-Create-a-new-DBus-structure-for-time-sources.patch +Patch6011: ntp-servers-improve-002-Use-the-structure-for-time-sources-in-ntp-py.patch +Patch6012: ntp-servers-improve-003-Use-the-structure-for-time-sources-in-the-Timezone-module.patch +Patch6013: ntp-servers-improve-004-Use-the-structure-for-time-sources-in-anaconda-py.patch +Patch6014: ntp-servers-improve-005-Use-the-structure-for-time-sources-in-network-py.patch +Patch6015: ntp-servers-improve-006-Add-support-for-the-NTP-server-status-cache.patch +Patch6016: ntp-servers-improve-007-Add-support-for-generating-a-summary-of-the-NTP-servers.patch +Patch6017: ntp-servers-improve-008-Use-the-structure-for-time-sources-in-TUI.patch +Patch6018: ntp-servers-improve-009-Use-the-structure-for-time-sources-in-GUI.patch +Patch6019: ntp-servers-improve-010-Add-support-for-the-timesource-kickstart-command.patch + +Patch9024: Change-length-limit-of-hostname-from-255-to-64.patch +Patch9025: Change-topbar-background-size.patch + +Patch6020: bugfix-Schedule-timed-actions-with-the-right-selector-18516.patch +Patch6021: bugfix-Reset-the-state-of-the-custom-partitioning-spoke.patch +Patch6022: bugfix-Fix-regression-reading-kernel-list-when-collecting-c.patch +Patch6023: bugfix-Fix-more-SElinux-contexts.patch +Patch6024: bugfix-Fix-issue-when-NFS-path-is-pointing-directly-to-ISO-.patch +Patch6025: bugfix-Create-the-initial-storage-model-during-the-initiali.patch +Patch6026: bugfix-Always-specify-the-boot-disk.patch +Patch6027: bugfix-Fix-passing-of-arguments-when-creating-dracut-argume.patch +Patch6028: bugfix-Reconfigure-DNF-payload-after-options-are-set.patch +Patch6029: bugfix-Only-pass-one-initrd-image-to-kexec.patch +Patch6030: bugfix-Fix-creating-cached-LVs-on-encrypted-PVs.patch +Patch6031: bugfix-Run-actions-of-the-Resize-dialog-in-the-reversed-ord.patch +Patch6032: bugfix-Reload-treeinfo-repositories-on-every-payload-reset.patch +Patch6033: bugfix-Remove-treeinfo-repositories-instead-of-disabling.patch +Patch6034: bugfix-Fix-crash-on-first-entering-of-source-spoke.patch +Patch6035: bugfix-Keep-treeinfo-repositories-disabled-after-payload-re.patch +Patch6036: bugfix-Fix-issue-that-treeinfo-repositories-were-never-disa.patch +Patch6037: bugfix-Fix-kickstart-file-error-with-user-groups.patch +Patch6038: bugfix-Create-ssh-user-using-only-existing-fields-1860058.patch +Patch6039: bugfix-Automatically-break-lines-in-labels-in-software-sele.patch +Patch6040: bugfix-Reset-the-RAID-level-of-the-device-request-1828092.patch +Patch6041: bugfix-Change-keyboard-ordering-to-US-layout-first-native-s.patch +Patch6042: bugfix-Handle-exceptions-from-threads-without-new-instances.patch +Patch6043: bugfix-network-fix-configuration-of-virtual-devices-by-boot.patch +Patch6044: bugfix-network-do-not-try-to-activate-connection-that-has-n.patch +Patch6045: bugfix-network-add-timeout-for-synchronous-activation-of-a-.patch +Patch6046: bugfix-Fix-traceback-when-removing-additional-repository.patch +Patch6047: bugfix-network-do-not-crash-when-updating-a-connection-with.patch +Patch6048: bugfix-Do-not-mount-as-RW-in-Dracut.patch +Patch6049: bugfix-The-underline-character-should-not-be-displayed.patch +Patch6050: bugfix-Recognize-systemd.unit-anaconda.target-in-anaconda-g.patch +Patch6051: bugfix-Always-clear-treeinfo-metadata-1872056.patch +Patch6052: bugfix-Apply-onboot-policy-even-when-network-was-configured.patch +Patch6053: bugfix-network-fix-parsing-of-hostname-from-ip-if-mac-is-de.patch +Patch6054: bugfix-Don-t-generate-container-data-for-non-container-devi.patch +Patch6055: bugfix-Differentiate-between-RAID-levels-of-a-device-and-it.patch +Patch6056: bugfix-Show-warning-message-when-entered-size-is-not-valid.patch +Patch6057: bugfix-Add-the-DBus-method-IsDeviceShrinkable-1875677.patch +Patch6058: bugfix-Check-if-original-partitions-are-mounted-too.patch +Patch6059: bugfix-network-get-hwadddr-when-binding-to-mac-more-robustl.patch +Patch6060: bugfix-Fix-the-combo-box-for-an-URL-type-of-additional-repo.patch +Patch6061: bugfix-Never-mount-partitions-on-a-disk-with-the-iso9660-fi.patch +Patch6062: bugfix-Add-missing-make-BuildRequires.patch +Patch6063: bugfix-Allow-to-format-selected-DASDs.patch +Patch6064: bugfix-Add-selinux-0-boot-parameter-when-SELinux-is-set-to-.patch +Patch6065: bugfix-Root-password-is-mandatory-if-there-is-not-admin-use.patch +Patch6066: bugfix-Fix-traceback-when-starting-installation-with-inst.c.patch +Patch6067: bugfix-Fix-checking-ssl-certificate-for-metadata-1745064.patch +Patch6068: bugfix-Fix-error-in-initrd-shift-count-out-of-range.patch +Patch6069: bugfix-Fix-the-logic-for-enabling-latest-updates.patch +Patch6070: bugfix-Don-t-enter-spokes-after-we-leave-the-Summary-hub.patch +Patch6071: bugfix-do-not-mount-dbus-source.patch +Patch6072: fix-xorg-timeout-and-throw-exception.patch +Patch6073: bugfix-Fix-issue-when-ns_info-cannot-be-retrieved-for-NVDim.patch +Patch6074: bugfix-Fix-SECTION-headers-in-docstrings.patch +Patch6075: delete-datezone-map.patch +Patch6076: change-inst-repo-default-value.patch + +Patch9026: support-use-sm3-crypt-user-password.patch + +Patch6077: bugfix-Cancel-planned-manual-update-of-system-time-on-turni.patch +Patch6078: revert-Set-default-entry-to-the-BLS-id-instead-of-th.patch + +Patch9027: bugfix-Solve-the-problem-that-the-circular-loading-progress-bar-does-not-rotate.patch + +Patch6079: backport-dracut-handle-compressed-kernel-modules.patch +Patch6080: backport-network-use-separate-main-conext-for-NM-client-in-threads.patch + +%define dbusver 1.2.3 +%define dnfver 3.6.0 +%define dracutver 034-7 +%define fcoeutilsver 1.0.12-3.20100323git +%define gettextver 0.19.8 +%define gtk3ver 3.22.17 +%define helpver 22.1-1 +%define isomd5sum 1.0.10 +%define langtablever 0.0.49 +%define libarchivever 3.0.4 +%define libblockdevver 2.1 +%define libxklavierver 5.4 +%define mehver 0.23-1 +%define nmver 1.0 +%define pykickstartver 3.27-1 +%define pypartedver 2.5-2 +%define rpmver 4.10.0 +%define simplelinever 1.1-1 +%define utillinuxver 2.15.1 +%define dasbusver 0.4 +BuildRequires: python3-pygments + +BuildRequires: audit-libs-devel libtool gettext-devel >= %{gettextver} gtk3-devel >= %{gtk3ver} +BuildRequires: gtk-doc gtk3-devel-docs >= %{gtk3ver} glib2-doc gobject-introspection-devel +BuildRequires: glade-devel libgnomekbd-devel libxklavier-devel >= %{libxklavierver} pango-devel +BuildRequires: python3-kickstart >= %{pykickstartver} python3-devel python3-nose systemd +BuildRequires: rpm-devel >= %{rpmver} libarchive-devel >= %{libarchivever} gdk-pixbuf2-devel +BuildRequires: libxml2 +BuildRequires: gsettings-desktop-schemas metacity + +Requires: anaconda-core = %{version}-%{release} +Requires: anaconda-tui = %{version}-%{release} +Requires: libblockdev-plugins-all >= %{libblockdevver} realmd isomd5sum >= %{isomd5sum} +Requires: kexec-tools createrepo_c tmux gdb rsync python3-meh-gui >= %{mehver} +Requires: adwaita-icon-theme python3-kickstart +Requires: tigervnc-server-minimal libxklavier >= %{libxklavierver} libgnomekbd +Requires: xz +Requires: nm-connection-editor keybinder3 anaconda-user-help >= %{helpver} yelp system-logos +Requires: python3 dracut >= %{dracutver} dracut-network dracut-live +%ifarch %{ix86} x86_64 +BuildRequires: desktop-file-utils +Requires: zenity fcoe-utils >= %{fcoeutilsver} +%endif + +Provides: anaconda-gui = %{version}-%{release} +Obsoletes: anaconda-gui < %{version}-%{release} + +Provides: anaconda-widgets = %{version}-%{release} +Obsoletes: anaconda-widgets < %{version}-%{release} + +Provides: anaconda-dracut = %{version}-%{release} +Obsoletes: anaconda-dracut < %{version}-%{release} + +Provides: anaconda-install-env-deps = %{version}-%{release} +Obsoletes: anaconda-install-env-deps < %{version}-%{release} + +%description +The anaconda package is a metapackage for the Anaconda installer. + +%package core +Summary: Core of the Anaconda installer +Requires: python3-libs python3-dnf >= %{dnfver} python3-blivet >= 1:3.2.2-1 +Requires: python3-blockdev >= %{libblockdevver} rpm-python3 >= %{rpmver} python3-productmd +Requires: libreport-anaconda >= 2.0.21-1 libselinux-python3 python3-meh >= %{mehver} +Requires: python3-pyparted >= %{pypartedver} python3-requests python3-requests-file +Requires: python3-requests-ftp python3-kickstart >= %{pykickstartver} +Requires: python3-langtable >= %{langtablever} util-linux >= %{utillinuxver} python3-gobject-base +Requires: python3-dbus python3-pwquality python3-systemd python3-dasbus >= %{dasbusver} +Requires: cracklib-dicts python3-pytz teamd NetworkManager >= %{nmver} NetworkManager-libnm >= %{nmver} +Requires: NetworkManager-team dhclient kbd chrony python3-ntplib systemd python3-pid +Requires: python3-ordered-set >= 2.0.0 glibc-langpack-en dbus-daemon +Requires: flatpak-libs +# required because of the rescue mode and VNC question +Requires: anaconda-tui = %{version}-%{release} +Provides: anaconda-images = %{version}-%{release} +Obsoletes: anaconda-images <= 10 +Provides: anaconda-runtime = %{version}-%{release} +Obsoletes: anaconda-runtime < %{version}-%{release} +Obsoletes: booty <= 0.107-1 + +# Ensure it's not possible for a version of grubby to be installed +# that doesn't work with btrfs subvolumes correctly... +Conflicts: grubby < 8.40-10 + +Requires: usermode + +%description core +The anaconda-core package contains the program which was used to install your +system. + +%package tui +Summary: Textual user interface for the Anaconda installer +Requires: anaconda-core = %{version}-%{release} python3-simpleline >= %{simplelinever} + +%description tui +This package contains textual user interface for the Anaconda installer. + + +%package devel +Summary: Development files for anaconda-widgets +Requires: glade +Requires: %{name}-widgets = %{version}-%{release} + +%description devel +This package contains libraries and header files needed for writing the anaconda +installer. It also contains Python and Glade support files, as well as +documentation for working with this library. + + +%prep +%autosetup -n %{name}-%{version} -p1 + +%build +# use actual build-time release number, not tarball creation time release number +%configure ANACONDA_RELEASE=%{release} +%make_build + +%install +%make_install +%delete_la + +# install {os_name} conf for anaconda +#if [ %{_vendor} != "{os_name}" ]; then +# sed -i "s#{os_name}#%{_vendor}#g" %{SOURCE1} +#fi +install -m 0755 %{SOURCE1} %{buildroot}/%{_sysconfdir}/%{name}/product.d/ + +# Create an empty directory for addons +install -d -m 0755 %{buildroot}%{_datadir}/anaconda/addons + +%ifarch %{ix86} x86_64 +desktop-file-install --dir=%{buildroot}%{_datadir}/applications %{buildroot}%{_datadir}/applications/liveinst.desktop +%endif + +# If no langs found, keep going +%find_lang %{name} || : + +%ldconfig_scriptlets + +%ifarch %{ix86} x86_64 +%post +update-desktop-database &> /dev/null || : + +%postun +update-desktop-database &> /dev/null || : +%endif + +%files +%defattr(-,root,root) +%license COPYING +%{_libdir}/libAnacondaWidgets.so.* +%{_libdir}/girepository*/AnacondaWidgets*typelib +%{python3_sitearch}/gi/overrides/* +%{python3_sitearch}/pyanaconda/ui/gui/* +%{_prefix}/libexec/anaconda/dd_* +%{_prefix}/lib/dracut/modules.d/80%{name}/* +%exclude %{python3_sitearch}/pyanaconda/ui/gui/spokes/blivet_gui.* + +%files core +%defattr(-,root,root) +%license COPYING +%{_sbindir}/anaconda +%{_sbindir}/handle-sshpw +%{_bindir}/instperf +%{_bindir}/analog +%{_bindir}/anaconda-cleanup +%{_bindir}/anaconda-disable-nm-ibft-plugin +%{_libdir}/libAnacondaWidgets.so +%{_prefix}/libexec/anaconda +%{_prefix}/lib/systemd/system-generators/* +%{_unitdir}/* +%{_datadir}/anaconda +%{_datadir}/locale/* +%{python3_sitearch}/pyanaconda +%exclude %{_prefix}/libexec/anaconda/dd_* +%exclude %{_libdir}/libAnacondaWidgets.so +%exclude %{_datadir}/gtk-doc +%exclude %{_datadir}/anaconda/ui/spokes/blivet_gui.* +%exclude %{_datadir}/glade/catalogs/AnacondaWidgets.xml +%exclude %{python3_sitearch}/pyanaconda/rescue.py* +%exclude %{python3_sitearch}/pyanaconda/__pycache__/rescue.* +%exclude %{python3_sitearch}/pyanaconda/ui/gui/* +%exclude %{python3_sitearch}/pyanaconda/ui/tui/* +%{_bindir}/analog +%{_bindir}/anaconda-cleanup +%dir %{_sysconfdir}/%{name} +%config %{_sysconfdir}/%{name}/* +%dir %{_sysconfdir}/%{name}/conf.d +%config %{_sysconfdir}/%{name}/conf.d/* +%dir %{_sysconfdir}/%{name}/product.d +%config %{_sysconfdir}/%{name}/product.d/* +%{_sbindir}/liveinst +%{_bindir}/liveinst +%{_libexecdir}/liveinst-setup.sh +%{_datadir}/applications/*.desktop +%{_sysconfdir}/xdg/autostart/*.desktop +%config(noreplace) %{_sysconfdir}/pam.d/* +%config(noreplace) %{_sysconfdir}/security/console.apps/* + +%files tui +%{python3_sitearch}/pyanaconda/rescue.py +%{python3_sitearch}/pyanaconda/__pycache__/rescue.* +%{python3_sitearch}/pyanaconda/ui/tui/* + +%files devel +%{_libdir}/libAnacondaWidgets.so +%{_libdir}/glade/modules/libAnacondaWidgets.so +%{_includedir}/* +%{_datadir}/glade/catalogs/AnacondaWidgets.xml +%{_datadir}/gtk-doc + +%changelog diff --git a/anaconda.yaml b/anaconda.yaml new file mode 100644 index 0000000..73f7b47 --- /dev/null +++ b/anaconda.yaml @@ -0,0 +1,4 @@ +version_control: github +src_repo: rhinstaller/anaconda +tag_prefix: anaconda- +seperator: . diff --git a/backport-dracut-handle-compressed-kernel-modules.patch b/backport-dracut-handle-compressed-kernel-modules.patch new file mode 100644 index 0000000..6f627e9 --- /dev/null +++ b/backport-dracut-handle-compressed-kernel-modules.patch @@ -0,0 +1,47 @@ +From c4a388d3956088c96631b72f0631db2a380127b0 Mon Sep 17 00:00:00 2001 +From: Mikhail Novosyolov +Date: Fri, 10 Jun 2022 22:03:43 +0300 +Subject: [PATCH] dracut: handle compressed kernel modules + +Compressed kernel modules could not be loaded. +Now both compressed and not compressed ones will be loaded. + +$ uname -r +5.10.74-generic-2rosa2021.1-x86_64 +$ ls -1v /lib/modules/$(uname -r)/kernel/drivers/scsi/device_handler/ +scsi_dh_alua.ko.zst +scsi_dh_emc.ko.zst +scsi_dh_hp_sw.ko.zst +scsi_dh_rdac.ko.zst + +Replaces https://github.com/rhinstaller/anaconda/pull/3501 +Noted by slava86@ +Reference:https://github.com/rhinstaller/anaconda/commit/c4a388d3956088c96631b72f0631db2a380127b0 +Conflict:NA +--- + dracut/anaconda-modprobe.sh | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/dracut/anaconda-modprobe.sh b/dracut/anaconda-modprobe.sh +index 97ee53bcb1..3640b4d42f 100755 +--- a/dracut/anaconda-modprobe.sh ++++ b/dracut/anaconda-modprobe.sh +@@ -14,11 +14,12 @@ MODULE_LIST="cramfs squashfs iscsi_tcp " + shopt -s nullglob + + SCSI_MODULES=/lib/modules/$KERNEL/kernel/drivers/scsi/device_handler/ +-for m in $SCSI_MODULES/*.ko; do ++for m in "$SCSI_MODULES"/*.ko*; do + # Shell spew to work around not having basename +- # Trim the paths off the prefix, then the . suffix +- a="${m##*/}" +- MODULE_LIST+=" ${a%.*}" ++ m="${m##*/}" ++ # Handle *.ko, *.ko.zst, *.ko.gz, *.ko.xz etc. ++ IFS='.ko' read -r -a m <<< "$m" ++ MODULE_LIST+=" ${m[0]}" + done + + shopt -u nullglob +-- +2.23.0 diff --git a/backport-network-use-separate-main-conext-for-NM-client-in-threads.patch b/backport-network-use-separate-main-conext-for-NM-client-in-threads.patch new file mode 100644 index 0000000..fe854f6 --- /dev/null +++ b/backport-network-use-separate-main-conext-for-NM-client-in-threads.patch @@ -0,0 +1,922 @@ +From 3972b5dadcadd355d2ff25eae601bc35c336c45a Mon Sep 17 00:00:00 2001 +From: Radek Vykydal +Date: Thu, 29 Sep 2022 12:38:55 +0200 +Subject: [PATCH] network: use separate main conext for NM client in threads + +Resolves: rhbz#1931389 + +Create a special NM client with separate main context for calling NM +client from installation tasks which run in separate threads. + +Based on a pull request by t.feng who deserves +the biggest credit, and upated with suggestions by poncovka + +The created client should be used only in a limited scope as documented +in nm_client_in_thread docstring. If we want to extend it and address +potential issues with client instance releasing and reusing we'd need to +follow recommendations from Thomas Haller's kind reviews: + + + +first of all, initializing a NMClient instance takes relatively long, +because it makes D-Bus calls and the round trip time adds up. Btw, if +you'd pass +instance_flags=NM.ClientInstanceFlags.NO_AUTO_FETCH_PERMISSIONS it can +make it faster, see here. If it's too slow, then the solution would be +to re-use the nmclient instance or use async initialization and do stuff +in parallel. Both is more complicated however, so not necessary unless +we find that it's a problem. + +What is maybe more a problem is that each GMainContext consumes at least +one file descriptor. When you use the sync nm_client_new() method, then +NMClient has an additional internal GMainContext, so possibly there are +2 or more file descriptors involved. The way to "stop" NMClient is by +unrefing it. However, with all async operations in glib, they cannot +complete right away. That is because when NMClient gets unrefed, it will +cancel all (internally) pending operations, but even when you cancel a +glib operation, the callback still will be invoked with the cancellation +error. And callbacks only get invoked by iterating/running the +mainloop/maincontext. This means, if you have a short-running +application (e.g. not a GUI) and a reasonable small number of NMClient +instances, then you don't need to care. Otherwise, you unfortunately +need to make sure that the GMainContext is still iterated just long +enough, for all operations to be cancelled. That's slightly cumbersome, +and you can use nm_client_get_context_busy_watcher() to find that out. + +Btw, what you also cannot do, is having a NMClient instance alive and +just not iterating the GMainContext anymore. NMClient will subscribe to +D-Bus events, and those come (because GDBus has a separate worker +thread) and will be enqueued in the GMainContext. This applies to all +applications that register DBus signals via GDBus: you must iterate the +context enough, so that those events get eventually processed. I think +this does not apply to you here, but it would apply, if you try to keep +the nmclient instance alive and reuse later. + + + +Reference:https://github.com/rhinstaller/anaconda/commit/3972b5dadcadd355d2ff25eae601bc35c336c45a +Conflict:NA +--- + pyanaconda/core/glib.py | 109 +++++++++++++++- + pyanaconda/modules/network/constants.py | 1 + + pyanaconda/modules/network/initialization.py | 127 +++++++++++-------- + pyanaconda/modules/network/installation.py | 21 +-- + pyanaconda/modules/network/network.py | 29 ++--- + pyanaconda/modules/network/nm_client.py | 113 +++++++++++------ + 6 files changed, 275 insertions(+), 125 deletions(-) + +diff --git a/pyanaconda/core/glib.py b/pyanaconda/core/glib.py +index 03c598d..3292538 100644 +--- a/pyanaconda/core/glib.py ++++ b/pyanaconda/core/glib.py +@@ -24,34 +24,42 @@ + + import gi + gi.require_version("GLib", "2.0") ++gi.require_version("Gio", "2.0") + + from gi.repository.GLib import markup_escape_text, format_size_full, \ + timeout_add_seconds, timeout_add, idle_add, \ + io_add_watch, child_watch_add, \ +- source_remove, \ ++ source_remove, timeout_source_new, \ + spawn_close_pid, spawn_async_with_pipes, \ + MainLoop, MainContext, \ + GError, Variant, VariantType, Bytes, \ + IOCondition, IOChannel, SpawnFlags, \ + MAXUINT ++from gi.repository.Gio import Cancellable ++ ++from pyanaconda.anaconda_loggers import get_module_logger ++log = get_module_logger(__name__) ++ + + __all__ = ["create_main_loop", "create_new_context", + "markup_escape_text", "format_size_full", + "timeout_add_seconds", "timeout_add", "idle_add", + "io_add_watch", "child_watch_add", +- "source_remove", ++ "source_remove", "timeout_source_new", + "spawn_close_pid", "spawn_async_with_pipes", + "GError", "Variant", "VariantType", "Bytes", + "IOCondition", "IOChannel", "SpawnFlags", +- "MAXUINT"] ++ "MAXUINT", "Cancellable"] + + +-def create_main_loop(): ++def create_main_loop(main_context=None): + """Create GLib main loop. + ++ :param main_context: main context to be used for the loop ++ :type main_context: GLib.MainContext + :returns: GLib.MainLoop instance. + """ +- return MainLoop() ++ return MainLoop(main_context) + + + def create_new_context(): +@@ -59,3 +67,94 @@ def create_new_context(): + + :returns: GLib.MainContext.""" + return MainContext.new() ++ ++ ++class GLibCallResult(): ++ """Result of GLib async finish callback.""" ++ def __init__(self): ++ self.received_data = None ++ self.error_message = "" ++ self.timeout = False ++ ++ @property ++ def succeeded(self): ++ """The async call has succeeded.""" ++ return not self.failed ++ ++ @property ++ def failed(self): ++ """The async call has failed.""" ++ return bool(self.error_message) or self.timeout ++ ++ ++def sync_call_glib(context, async_call, async_call_finish, timeout, *call_args): ++ """Call GLib asynchronous method synchronously with timeout. ++ ++ :param context: context for the new loop in which the method will be called ++ :type context: GMainContext ++ :param async_call: asynchronous GLib method to be called ++ :type async_call: GLib method ++ :param async_call_finish: finish method of the asynchronous call ++ :type async_call_finish: GLib method ++ :param timeout: timeout for the loop in seconds (0 == no timeout) ++ :type timeout: int ++ ++ *call_args should hold all positional arguments preceding the cancellable argument ++ """ ++ ++ info = async_call.get_symbol() ++ result = GLibCallResult() ++ ++ loop = create_main_loop(context) ++ callbacks = [loop.quit] ++ ++ def _stop_loop(): ++ log.debug("sync_call_glib[%s]: quit", info) ++ while callbacks: ++ callback = callbacks.pop() ++ callback() ++ ++ def _cancellable_cb(): ++ log.debug("sync_call_glib[%s]: cancelled", info) ++ ++ cancellable = Cancellable() ++ cancellable_id = cancellable.connect(_cancellable_cb) ++ callbacks.append(lambda: cancellable.disconnect(cancellable_id)) ++ ++ def _timeout_cb(user_data): ++ log.debug("sync_call_glib[%s]: timeout", info) ++ result.timeout = True ++ cancellable.cancel() ++ return False ++ ++ timeout_source = timeout_source_new(int(timeout * 1000)) ++ timeout_source.set_callback(_timeout_cb) ++ timeout_source.attach(context) ++ callbacks.append(timeout_source.destroy) ++ ++ def _finish_cb(source_object, async_result): ++ log.debug("sync_call_glib[%s]: call %s", ++ info, ++ async_call_finish.get_symbol()) ++ try: ++ result.received_data = async_call_finish(async_result) ++ except Exception as e: # pylint: disable=broad-except ++ result.error_message = str(e) ++ finally: ++ _stop_loop() ++ ++ context.push_thread_default() ++ ++ log.debug("sync_call_glib[%s]: call", info) ++ try: ++ async_call( ++ *call_args, ++ cancellable=cancellable, ++ callback=_finish_cb ++ ) ++ loop.run() ++ finally: ++ _stop_loop() ++ context.pop_thread_default() ++ ++ return result +diff --git a/pyanaconda/modules/network/constants.py b/pyanaconda/modules/network/constants.py +index 530a8e2..e2759af 100644 +--- a/pyanaconda/modules/network/constants.py ++++ b/pyanaconda/modules/network/constants.py +@@ -25,6 +25,7 @@ from pyanaconda.core.constants import FIREWALL_DEFAULT, FIREWALL_DISABLED, \ + + NM_CONNECTION_UUID_LENGTH = 36 + CONNECTION_ACTIVATION_TIMEOUT = 45 ++CONNECTION_ADDING_TIMEOUT = 5 + + + @unique +diff --git a/pyanaconda/modules/network/initialization.py b/pyanaconda/modules/network/initialization.py +index b27a469..43a4a19 100644 +--- a/pyanaconda/modules/network/initialization.py ++++ b/pyanaconda/modules/network/initialization.py +@@ -23,7 +23,8 @@ from pyanaconda.modules.network.network_interface import NetworkInitializationTa + from pyanaconda.modules.network.nm_client import get_device_name_from_network_data, \ + ensure_active_connection_for_device, update_connection_from_ksdata, \ + add_connection_from_ksdata, bound_hwaddr_of_device, get_connections_available_for_iface, \ +- update_connection_values, commit_changes_with_autoconnection_blocked, is_ibft_connection ++ update_connection_values, commit_changes_with_autoconnection_blocked, is_ibft_connection, \ ++ nm_client_in_thread, activate_connection_sync + from pyanaconda.modules.network.ifcfg import get_ifcfg_file_of_device, find_ifcfg_uuid_of_device, \ + get_master_slaves_from_ifcfgs + from pyanaconda.modules.network.device_configuration import supported_wired_device_types, \ +@@ -40,11 +41,9 @@ from gi.repository import NM + class ApplyKickstartTask(Task): + """Task for application of kickstart network configuration.""" + +- def __init__(self, nm_client, network_data, supported_devices, bootif, ifname_option_values): ++ def __init__(self, network_data, supported_devices, bootif, ifname_option_values): + """Create a new task. + +- :param nm_client: NetworkManager client used as configuration backend +- :type nm_client: NM.Client + :param network_data: kickstart network data to be applied + :type: list(NetworkData) + :param supported_devices: list of names of supported network devices +@@ -55,7 +54,6 @@ class ApplyKickstartTask(Task): + :type ifname_option_values: list(str) + """ + super().__init__() +- self._nm_client = nm_client + self._network_data = network_data + self._supported_devices = supported_devices + self._bootif = bootif +@@ -76,13 +74,17 @@ class ApplyKickstartTask(Task): + :returns: names of devices to which kickstart was applied + :rtype: list(str) + """ ++ with nm_client_in_thread() as nm_client: ++ return self._run(nm_client) ++ ++ def _run(self, nm_client): + applied_devices = [] + + if not self._network_data: + log.debug("%s: No kickstart data.", self.name) + return applied_devices + +- if not self._nm_client: ++ if not nm_client: + log.debug("%s: No NetworkManager available.", self.name) + return applied_devices + +@@ -92,7 +94,7 @@ class ApplyKickstartTask(Task): + log.info("%s: Wireless devices configuration is not supported.", self.name) + continue + +- device_name = get_device_name_from_network_data(self._nm_client, ++ device_name = get_device_name_from_network_data(nm_client, + network_data, + self._supported_devices, + self._bootif) +@@ -100,10 +102,10 @@ class ApplyKickstartTask(Task): + log.warning("%s: --device %s not found", self.name, network_data.device) + continue + +- ifcfg_file = get_ifcfg_file_of_device(self._nm_client, device_name) ++ ifcfg_file = get_ifcfg_file_of_device(nm_client, device_name) + if ifcfg_file and ifcfg_file.is_from_kickstart: + if network_data.activate: +- if ensure_active_connection_for_device(self._nm_client, ifcfg_file.uuid, ++ if ensure_active_connection_for_device(nm_client, ifcfg_file.uuid, + device_name): + applied_devices.append(device_name) + continue +@@ -114,31 +116,39 @@ class ApplyKickstartTask(Task): + + connection = None + if ifcfg_file: +- connection = self._nm_client.get_connection_by_uuid(ifcfg_file.uuid) ++ connection = nm_client.get_connection_by_uuid(ifcfg_file.uuid) + if not connection: +- connection = self._find_initramfs_connection_of_iface(device_name) ++ connection = self._find_initramfs_connection_of_iface(nm_client, device_name) + + if connection: + # if the device was already configured in initramfs update the settings +- log.debug("%s: pre kickstart - updating connection %s of device %s", ++ log.debug("%s: updating connection %s of device %s", + self.name, connection.get_uuid(), device_name) +- update_connection_from_ksdata(self._nm_client, connection, network_data, +- device_name=device_name) ++ update_connection_from_ksdata( ++ nm_client, ++ connection, ++ network_data, ++ device_name=device_name) + if network_data.activate: +- device = self._nm_client.get_device_by_iface(device_name) +- self._nm_client.activate_connection_async(connection, device, None, None) +- log.debug("%s: pre kickstart - activating connection %s with device %s", +- self.name, connection.get_uuid(), device_name) ++ device = nm_client.get_device_by_iface(device_name) ++ nm_client.activate_connection_async(connection, device, None, None) ++ log.debug("%s: pre kickstart - activating connection", self.name) ++ log.debug("uuid is: %s", connection.get_uuid()) ++ log.debug("device_name is: %s", device_name) + else: +- log.debug("%s: pre kickstart - adding connection for %s", self.name, device_name) +- add_connection_from_ksdata(self._nm_client, network_data, device_name, +- activate=network_data.activate, +- ifname_option_values=self._ifname_option_values) ++ log.debug("%s: adding connection for %s", self.name, device_name) ++ add_connection_from_ksdata( ++ nm_client, ++ network_data, ++ device_name, ++ activate=network_data.activate, ++ ifname_option_values=self._ifname_option_values ++ ) + + return applied_devices + +- def _find_initramfs_connection_of_iface(self, iface): +- device = self._nm_client.get_device_by_iface(iface) ++ def _find_initramfs_connection_of_iface(self, nm_client, iface): ++ device = nm_client.get_device_by_iface(iface) + if device: + cons = device.get_available_connections() + for con in cons: +@@ -150,14 +160,11 @@ class ApplyKickstartTask(Task): + class ConsolidateInitramfsConnectionsTask(Task): + """Task for consolidation of initramfs connections.""" + +- def __init__(self, nm_client): ++ def __init__(self): + """Create a new task. + +- :param nm_client: NetworkManager client used as configuration backend +- :type nm_client: NM.Client + """ + super().__init__() +- self._nm_client = nm_client + + @property + def name(self): +@@ -174,13 +181,17 @@ class ConsolidateInitramfsConnectionsTask(Task): + :returns: names of devices of which the connections have been consolidated + :rtype: list(str) + """ ++ with nm_client_in_thread() as nm_client: ++ return self._run(nm_client) ++ ++ def _run(self, nm_client): + consolidated_devices = [] + +- if not self._nm_client: ++ if not nm_client: + log.debug("%s: No NetworkManager available.", self.name) + return consolidated_devices + +- for device in self._nm_client.get_devices(): ++ for device in nm_client.get_devices(): + cons = device.get_available_connections() + number_of_connections = len(cons) + iface = device.get_iface() +@@ -200,7 +211,7 @@ class ConsolidateInitramfsConnectionsTask(Task): + self.name, number_of_connections, iface) + continue + +- ifcfg_file = get_ifcfg_file_of_device(self._nm_client, iface) ++ ifcfg_file = get_ifcfg_file_of_device(nm_client, iface) + if not ifcfg_file: + log.debug("%s: %d for %s - no ifcfg file found", + self.name, number_of_connections, iface) +@@ -222,7 +233,7 @@ class ConsolidateInitramfsConnectionsTask(Task): + self.name, number_of_connections, iface) + + ensure_active_connection_for_device( +- self._nm_client, ++ nm_client, + con_uuid, + iface, + only_replace=True +@@ -251,11 +262,9 @@ class ConsolidateInitramfsConnectionsTask(Task): + class SetRealOnbootValuesFromKickstartTask(Task): + """Task for setting of real ONBOOT values from kickstart.""" + +- def __init__(self, nm_client, network_data, supported_devices, bootif, ifname_option_values): ++ def __init__(self, network_data, supported_devices, bootif, ifname_option_values): + """Create a new task. + +- :param nm_client: NetworkManager client used as configuration backend +- :type nm_client: NM.Client + :param network_data: kickstart network data to be applied + :type: list(NetworkData) + :param supported_devices: list of names of supported network devices +@@ -266,7 +275,6 @@ class SetRealOnbootValuesFromKickstartTask(Task): + :type ifname_option_values: list(str) + """ + super().__init__() +- self._nm_client = nm_client + self._network_data = network_data + self._supported_devices = supported_devices + self._bootif = bootif +@@ -287,9 +295,13 @@ class SetRealOnbootValuesFromKickstartTask(Task): + :return: names of devices for which ONBOOT was updated + :rtype: list(str) + """ ++ with nm_client_in_thread() as nm_client: ++ return self._run(nm_client) ++ ++ def _run(self, nm_client): + updated_devices = [] + +- if not self._nm_client: ++ if not nm_client: + log.debug("%s: No NetworkManager available.", self.name) + return updated_devices + +@@ -298,7 +310,7 @@ class SetRealOnbootValuesFromKickstartTask(Task): + return updated_devices + + for network_data in self._network_data: +- device_name = get_device_name_from_network_data(self._nm_client, ++ device_name = get_device_name_from_network_data(nm_client, + network_data, + self._supported_devices, + self._bootif) +@@ -318,7 +330,7 @@ class SetRealOnbootValuesFromKickstartTask(Task): + + cons_to_update = [] + for devname in devices_to_update: +- cons = get_connections_available_for_iface(self._nm_client, devname) ++ cons = get_connections_available_for_iface(nm_client, devname) + n_cons = len(cons) + con = None + if n_cons == 1: +@@ -326,8 +338,8 @@ class SetRealOnbootValuesFromKickstartTask(Task): + else: + log.debug("%s: %d connections found for %s", self.name, n_cons, devname) + if n_cons > 1: +- ifcfg_uuid = find_ifcfg_uuid_of_device(self._nm_client, devname) or "" +- con = self._nm_client.get_connection_by_uuid(ifcfg_uuid) ++ ifcfg_uuid = find_ifcfg_uuid_of_device(nm_client, devname) or "" ++ con = nm_client.get_connection_by_uuid(ifcfg_uuid) + if con: + cons_to_update.append((devname, con)) + +@@ -335,7 +347,7 @@ class SetRealOnbootValuesFromKickstartTask(Task): + if network_data.bondslaves or network_data.teamslaves or network_data.bridgeslaves: + # Master can be identified by devname or uuid, try to find master uuid + master_uuid = None +- device = self._nm_client.get_device_by_iface(master) ++ device = nm_client.get_device_by_iface(master) + if device: + cons = device.get_available_connections() + n_cons = len(cons) +@@ -344,9 +356,9 @@ class SetRealOnbootValuesFromKickstartTask(Task): + else: + log.debug("%s: %d connections found for %s", self.name, n_cons, master) + +- for name, con_uuid in get_master_slaves_from_ifcfgs(self._nm_client, ++ for name, con_uuid in get_master_slaves_from_ifcfgs(nm_client, + master, uuid=master_uuid): +- con = self._nm_client.get_connection_by_uuid(con_uuid) ++ con = nm_client.get_connection_by_uuid(con_uuid) + cons_to_update.append((name, con)) + + for devname, con in cons_to_update: +@@ -356,7 +368,7 @@ class SetRealOnbootValuesFromKickstartTask(Task): + con, + [("connection", NM.SETTING_CONNECTION_AUTOCONNECT, network_data.onboot)] + ) +- commit_changes_with_autoconnection_blocked(con) ++ commit_changes_with_autoconnection_blocked(con, nm_client) + updated_devices.append(devname) + + return updated_devices +@@ -365,18 +377,15 @@ class SetRealOnbootValuesFromKickstartTask(Task): + class DumpMissingIfcfgFilesTask(Task): + """Task for dumping of missing ifcfg files.""" + +- def __init__(self, nm_client, default_network_data, ifname_option_values): ++ def __init__(self, default_network_data, ifname_option_values): + """Create a new task. + +- :param nm_client: NetworkManager client used as configuration backend +- :type nm_client: NM.Client + :param default_network_data: kickstart network data of default device configuration + :type default_network_data: NetworkData + :param ifname_option_values: list of ifname boot option values + :type ifname_option_values: list(str) + """ + super().__init__() +- self._nm_client = nm_client + self._default_network_data = default_network_data + self._ifname_option_values = ifname_option_values + +@@ -404,7 +413,7 @@ class DumpMissingIfcfgFilesTask(Task): + return con + return None + +- def _update_connection(self, con, iface): ++ def _update_connection(self, nm_client, con, iface): + log.debug("%s: updating id and binding (interface-name) of connection %s for %s", + self.name, con.get_uuid(), iface) + s_con = con.get_setting_connection() +@@ -414,7 +423,7 @@ class DumpMissingIfcfgFilesTask(Task): + if s_wired: + # By default connections are bound to interface name + s_wired.set_property(NM.SETTING_WIRED_MAC_ADDRESS, None) +- bound_mac = bound_hwaddr_of_device(self._nm_client, iface, self._ifname_option_values) ++ bound_mac = bound_hwaddr_of_device(nm_client, iface, self._ifname_option_values) + if bound_mac: + s_wired.set_property(NM.SETTING_WIRED_MAC_ADDRESS, bound_mac) + log.debug("%s: iface %s bound to mac address %s by ifname boot option", +@@ -427,19 +436,23 @@ class DumpMissingIfcfgFilesTask(Task): + :returns: names of devices for which ifcfg file was created + :rtype: list(str) + """ ++ with nm_client_in_thread() as nm_client: ++ return self._run(nm_client) ++ ++ def _run(self, nm_client): + new_ifcfgs = [] + +- if not self._nm_client: ++ if not nm_client: + log.debug("%s: No NetworkManager available.", self.name) + return new_ifcfgs + + dumped_device_types = supported_wired_device_types + virtual_device_types +- for device in self._nm_client.get_devices(): ++ for device in nm_client.get_devices(): + if device.get_device_type() not in dumped_device_types: + continue + + iface = device.get_iface() +- if get_ifcfg_file_of_device(self._nm_client, iface): ++ if get_ifcfg_file_of_device(nm_client, iface): + continue + + cons = device.get_available_connections() +@@ -479,7 +492,9 @@ class DumpMissingIfcfgFilesTask(Task): + continue + + if con: +- self._update_connection(con, iface) ++ self._update_connection(nm_client, con, iface) ++ # Update some values of connection generated in initramfs so it ++ # can be used as persistent configuration. + if has_initramfs_con: + update_connection_values( + con, +@@ -494,7 +509,7 @@ class DumpMissingIfcfgFilesTask(Task): + if has_initramfs_con: + network_data.onboot = True + add_connection_from_ksdata( +- self._nm_client, ++ nm_client, + network_data, + iface, + activate=False, +diff --git a/pyanaconda/modules/network/installation.py b/pyanaconda/modules/network/installation.py +index e923270..7ce57ee 100644 +--- a/pyanaconda/modules/network/installation.py ++++ b/pyanaconda/modules/network/installation.py +@@ -23,7 +23,7 @@ from pyanaconda.modules.common.errors.installation import NetworkInstallationErr + from pyanaconda.modules.common.task import Task + from pyanaconda.anaconda_loggers import get_module_logger + from pyanaconda.modules.network.nm_client import update_connection_values, \ +- commit_changes_with_autoconnection_blocked ++ commit_changes_with_autoconnection_blocked, nm_client_in_thread + from pyanaconda.modules.network.ifcfg import find_ifcfg_uuid_of_device + from pyanaconda.modules.network.utils import guard_by_system_configuration + +@@ -286,16 +286,13 @@ Name={} + class ConfigureActivationOnBootTask(Task): + """Task for configuration of automatic activation of devices on boot""" + +- def __init__(self, nm_client, onboot_ifaces): ++ def __init__(self, onboot_ifaces): + """Create a new task. + +- :param nm_client: NetworkManager client used as configuration backend +- :type nm_client: NM.Client + :param onboot_ifaces: interfaces that should be autoactivated on boot + :type onboot_ifaces: list(str) + """ + super().__init__() +- self._nm_client = nm_client + self._onboot_ifaces = onboot_ifaces + + @property +@@ -304,18 +301,24 @@ class ConfigureActivationOnBootTask(Task): + + @guard_by_system_configuration(return_value=None) + def run(self): +- if not self._nm_client: ++ with nm_client_in_thread() as nm_client: ++ return self._run(nm_client) ++ ++ def _run(self, nm_client): ++ if not nm_client: + log.debug("%s: No NetworkManager available.", self.name) + return None + + for iface in self._onboot_ifaces: +- con_uuid = find_ifcfg_uuid_of_device(self._nm_client, iface) ++ con_uuid = find_ifcfg_uuid_of_device(nm_client, iface) + if con_uuid: +- con = self._nm_client.get_connection_by_uuid(con_uuid) ++ con = nm_client.get_connection_by_uuid(con_uuid) + update_connection_values( + con, + [("connection", NM.SETTING_CONNECTION_AUTOCONNECT, True)] + ) +- commit_changes_with_autoconnection_blocked(con) ++ commit_changes_with_autoconnection_blocked(con, nm_client) ++ log.debug("updated connection %s:\n%s", con.get_uuid(), ++ con.to_dbus(NM.ConnectionSerializationFlags.ALL)) + else: + log.warning("Configure ONBOOT: can't find ifcfg for %s", iface) +diff --git a/pyanaconda/modules/network/network.py b/pyanaconda/modules/network/network.py +index 36c7f48..7dba851 100644 +--- a/pyanaconda/modules/network/network.py ++++ b/pyanaconda/modules/network/network.py +@@ -34,7 +34,7 @@ from pyanaconda.modules.network.firewall import FirewallModule + from pyanaconda.modules.network.device_configuration import DeviceConfigurations, \ + supported_device_types, supported_wired_device_types + from pyanaconda.modules.network.nm_client import devices_ignore_ipv6, get_connections_dump, \ +- get_dracut_arguments_from_connection, is_ibft_connection ++ get_dracut_arguments_from_connection, is_ibft_connection, get_new_nm_client + from pyanaconda.modules.network.ifcfg import get_kickstart_network_data, \ + get_ifcfg_file, get_ifcfg_files_content + from pyanaconda.modules.network.installation import NetworkInstallationTask, \ +@@ -74,17 +74,12 @@ class NetworkService(KickstartService): + ) + + self.connected_changed = Signal() +- self.nm_client = None + # TODO fallback solution - use Gio/GNetworkMonitor ? +- if SystemBus.check_connection(): +- nm_client = NM.Client.new(None) +- if nm_client.get_nm_running(): +- self.nm_client = nm_client +- self.nm_client.connect("notify::%s" % NM.CLIENT_STATE, self._nm_state_changed) +- initial_state = self.nm_client.get_state() +- self.set_connected(self._nm_state_connected(initial_state)) +- else: +- log.debug("NetworkManager is not running.") ++ self.nm_client = get_new_nm_client() ++ if self.nm_client: ++ self.nm_client.connect("notify::%s" % NM.CLIENT_STATE, self._nm_state_changed) ++ initial_state = self.nm_client.get_state() ++ self.set_connected(self._nm_state_connected(initial_state)) + + self._original_network_data = [] + self._device_configurations = None +@@ -319,7 +314,6 @@ class NetworkService(KickstartService): + all_onboot_ifaces = list(set(onboot_ifaces + onboot_ifaces_by_policy)) + + task = ConfigureActivationOnBootTask( +- self.nm_client, + all_onboot_ifaces + ) + task.succeeded_signal.connect(lambda: self.log_task_result(task)) +@@ -451,7 +445,7 @@ class NetworkService(KickstartService): + + :returns: a task consolidating the connections + """ +- task = ConsolidateInitramfsConnectionsTask(self.nm_client) ++ task = ConsolidateInitramfsConnectionsTask() + task.succeeded_signal.connect(lambda: self.log_task_result(task, check_result=True)) + return task + +@@ -550,8 +544,7 @@ class NetworkService(KickstartService): + :returns: a task applying the kickstart + """ + supported_devices = [dev_info.device_name for dev_info in self.get_supported_devices()] +- task = ApplyKickstartTask(self.nm_client, +- self._original_network_data, ++ task = ApplyKickstartTask(self._original_network_data, + supported_devices, + self.bootif, + self.ifname_option_values) +@@ -571,8 +564,7 @@ class NetworkService(KickstartService): + :returns: a task setting the values + """ + supported_devices = [dev_info.device_name for dev_info in self.get_supported_devices()] +- task = SetRealOnbootValuesFromKickstartTask(self.nm_client, +- self._original_network_data, ++ task = SetRealOnbootValuesFromKickstartTask(self._original_network_data, + supported_devices, + self.bootif, + self.ifname_option_values) +@@ -600,8 +592,7 @@ class NetworkService(KickstartService): + """ + data = self.get_kickstart_handler() + default_network_data = data.NetworkData(onboot=False, ipv6="auto") +- task = DumpMissingIfcfgFilesTask(self.nm_client, +- default_network_data, ++ task = DumpMissingIfcfgFilesTask(default_network_data, + self.ifname_option_values) + task.succeeded_signal.connect(lambda: self.log_task_result(task, check_result=True)) + return task +diff --git a/pyanaconda/modules/network/nm_client.py b/pyanaconda/modules/network/nm_client.py +index 9e57db4..3128de5 100644 +--- a/pyanaconda/modules/network/nm_client.py ++++ b/pyanaconda/modules/network/nm_client.py +@@ -21,20 +21,67 @@ + import gi + gi.require_version("NM", "1.0") + from gi.repository import NM ++from contextlib import contextmanager + + import socket +-from queue import Queue, Empty + from pykickstart.constants import BIND_TO_MAC ++from pyanaconda.core.glib import create_new_context, GError, sync_call_glib + from pyanaconda.modules.network.constants import NM_CONNECTION_UUID_LENGTH, \ +- CONNECTION_ACTIVATION_TIMEOUT ++ CONNECTION_ACTIVATION_TIMEOUT, CONNECTION_ADDING_TIMEOUT + from pyanaconda.modules.network.kickstart import default_ks_vlan_interface_name + from pyanaconda.modules.network.utils import is_s390, get_s390_settings, netmask2prefix, \ + prefix2netmask ++from pyanaconda.core.dbus import SystemBus + + from pyanaconda.anaconda_loggers import get_module_logger + log = get_module_logger(__name__) + + ++@contextmanager ++def nm_client_in_thread(): ++ """Create NM Client with new GMainContext to be run in thread. ++ ++ Expected to be used only in installer environment for a few ++ one-shot isolated network configuration tasks. ++ Destroying of the created NM Client instance and release of ++ related resources is not implemented. ++ ++ For more information see NetworkManager example examples/python/gi/gmaincontext.py ++ """ ++ mainctx = create_new_context() ++ mainctx.push_thread_default() ++ ++ try: ++ yield get_new_nm_client() ++ finally: ++ mainctx.pop_thread_default() ++ ++ ++def get_new_nm_client(): ++ """Get new instance of NMClient. ++ ++ :returns: an instance of NetworkManager NMClient or None if system bus ++ is not available or NM is not running ++ :rtype: NM.NMClient ++ """ ++ if not SystemBus.check_connection(): ++ log.debug("get new NM Client failed: SystemBus connection check failed.") ++ return None ++ ++ try: ++ nm_client = NM.Client.new(None) ++ except GError as e: ++ log.debug("get new NM Client constructor failed: %s", e) ++ return None ++ ++ if not nm_client.get_nm_running(): ++ log.debug("get new NM Client failed: NetworkManager is not running.") ++ return None ++ ++ log.debug("get new NM Client succeeded.") ++ return nm_client ++ ++ + def get_iface_from_connection(nm_client, uuid): + """Get the name of device that would be used for the connection. + +@@ -586,7 +633,7 @@ def update_connection_from_ksdata(nm_client, connection, network_data, device_na + + bind_connection(nm_client, connection, network_data.bindto, device_name) + +- commit_changes_with_autoconnection_blocked(connection) ++ commit_changes_with_autoconnection_blocked(connection, nm_client) + + log.debug("updated connection %s:\n%s", connection.get_uuid(), + connection.to_dbus(NM.ConnectionSerializationFlags.NO_SECRETS)) +@@ -872,8 +919,6 @@ def update_connection_values(connection, new_values): + else: + log.debug("setting '%s' not found while updating connection %s", + setting_name, connection.get_uuid()) +- log.debug("updated connection %s:\n%s", connection.get_uuid(), +- connection.to_dbus(NM.ConnectionSerializationFlags.ALL)) + + + def devices_ignore_ipv6(nm_client, device_types): +@@ -911,7 +956,7 @@ def get_connections_dump(nm_client): + return "\n".join(con_dumps) + + +-def commit_changes_with_autoconnection_blocked(connection, save_to_disk=True): ++def commit_changes_with_autoconnection_blocked(connection, nm_client, save_to_disk=True): + """Implementation of NM CommitChanges() method with blocked autoconnection. + + Update2() API is used to implement the functionality (called synchronously). +@@ -926,27 +971,28 @@ def commit_changes_with_autoconnection_blocked(connection, save_to_disk=True): + :return: on success result of the Update2() call, None of failure + :rtype: GVariant of type "a{sv}" or None + """ +- sync_queue = Queue() +- +- def finish_callback(connection, result, sync_queue): +- ret = connection.update2_finish(result) +- sync_queue.put(ret) +- + flags = NM.SettingsUpdate2Flags.BLOCK_AUTOCONNECT + if save_to_disk: + flags |= NM.SettingsUpdate2Flags.TO_DISK +- + con2 = NM.SimpleConnection.new_clone(connection) +- connection.update2( ++ ++ result = sync_call_glib( ++ nm_client.get_main_context(), ++ connection.update2, ++ connection.update2_finish, ++ CONNECTION_ADDING_TIMEOUT, + con2.to_dbus(NM.ConnectionSerializationFlags.ALL), + flags, +- None, +- None, +- finish_callback, +- sync_queue ++ None + ) + +- return sync_queue.get() ++ if result.failed: ++ log.error("comitting changes of connection %s failed: %s", ++ connection.get_uuid(), ++ result.error_message) ++ return None ++ ++ return result.received_data + + + def activate_connection_sync(nm_client, connection, device): +@@ -960,28 +1006,23 @@ def activate_connection_sync(nm_client, connection, device): + None if not needed + :type device: NM.Device + """ +- sync_queue = Queue() +- +- def finish_callback(nm_client, result, sync_queue): +- ret = nm_client.activate_connection_finish(result) +- sync_queue.put(ret) +- +- nm_client.activate_connection_async( ++ result = sync_call_glib( ++ nm_client.get_main_context(), ++ nm_client.activate_connection_async, ++ nm_client.activate_connection_finish, ++ CONNECTION_ACTIVATION_TIMEOUT, + connection, + device, +- None, +- None, +- finish_callback, +- sync_queue ++ None + ) + +- try: +- ret = sync_queue.get(timeout=CONNECTION_ACTIVATION_TIMEOUT) +- except Empty: +- log.error("Activation of a connection timed out.") +- ret = None ++ if result.failed: ++ log.error("Activation of a connection timed out, activate of connection %s failed: %s", ++ connection.get_uuid(), ++ result.error_message) ++ return None + +- return ret ++ return result.received_data + + + def get_dracut_arguments_from_connection(nm_client, connection, iface, target_ip, +-- +2.23.0 + diff --git a/bugfix-Add-missing-make-BuildRequires.patch b/bugfix-Add-missing-make-BuildRequires.patch new file mode 100644 index 0000000..189d507 --- /dev/null +++ b/bugfix-Add-missing-make-BuildRequires.patch @@ -0,0 +1,26 @@ +From f7398e8ceaa634bff73b1b1cd04ac0aa572d5249 Mon Sep 17 00:00:00 2001 +From: Martin Pitt +Date: Thu, 1 Oct 2020 15:44:43 +0200 +Subject: [PATCH] Add missing "make" BuildRequires + +The .spec calls make, and it's not present in the Fedora container +images. +--- + anaconda.spec.in | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/anaconda.spec.in b/anaconda.spec.in +index d2b34a65f..5c5146c9c 100644 +--- a/anaconda.spec.in ++++ b/anaconda.spec.in +@@ -54,6 +54,7 @@ BuildRequires: gobject-introspection-devel + BuildRequires: glade-devel + BuildRequires: libgnomekbd-devel + BuildRequires: libxklavier-devel >= %{libxklavierver} ++BuildRequires: make + BuildRequires: pango-devel + BuildRequires: python3-kickstart >= %{pykickstartver} + BuildRequires: python3-devel +-- +2.23.0 + diff --git a/bugfix-Add-selinux-0-boot-parameter-when-SELinux-is-set-to-.patch b/bugfix-Add-selinux-0-boot-parameter-when-SELinux-is-set-to-.patch new file mode 100644 index 0000000..97f264f --- /dev/null +++ b/bugfix-Add-selinux-0-boot-parameter-when-SELinux-is-set-to-.patch @@ -0,0 +1,63 @@ +From 8437fe761224a97967a076e05143304a225c3e05 Mon Sep 17 00:00:00 2001 +From: Ondrej Mosnacek +Date: Fri, 2 Oct 2020 13:06:26 +0200 +Subject: [PATCH] Add selinux=0 boot parameter when SELinux is set to disabled + (#1882464) + +We are trying to eliminate the reliance on disabling SELinux via +/etc/selinux/config in Fedora [1], since this functionality is being +deprecated upstream. + +Even though only setting SELINUX=disabled in /etc/selinux/config will +still lead to a similar result as if SELinux would be disabled +completely, users might complain that Anaconda didn't actually do the +right thing, so let's make sure it is done properly by adding selinux=0 +to the target system's kernel command line when the user requests +SELinux to be disabled via anaconda command line or kickstart. + +[1] https://fedoraproject.org/wiki/Changes/Remove_Support_For_SELinux_Runtime_Disable + +Signed-off-by: Ondrej Mosnacek +--- + pyanaconda/modules/storage/bootloader/base.py | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/pyanaconda/modules/storage/bootloader/base.py b/pyanaconda/modules/storage/bootloader/base.py +index d690ca056..12f8f54b2 100644 +--- a/pyanaconda/modules/storage/bootloader/base.py ++++ b/pyanaconda/modules/storage/bootloader/base.py +@@ -35,8 +35,9 @@ from pyanaconda.core.configuration.anaconda import conf + from pyanaconda.core.i18n import N_, _ + from pyanaconda.modules.common.constants.objects import FCOE, ISCSI, BOOTLOADER + from pyanaconda.modules.common.structures.iscsi import Node +-from pyanaconda.modules.common.constants.services import STORAGE, NETWORK ++from pyanaconda.modules.common.constants.services import STORAGE, NETWORK, SECURITY + from pyanaconda.modules.common.structures.network import NetworkDeviceInfo ++from pykickstart.constants import SELINUX_DISABLED + + log = get_module_logger(__name__) + +@@ -729,6 +730,7 @@ class BootLoader(object): + self._set_storage_boot_args(storage) + self._preserve_some_boot_args() + self._set_graphical_boot_args() ++ self._set_security_boot_args() + + def _set_extra_boot_args(self): + """Set the extra boot args.""" +@@ -885,6 +887,12 @@ class BootLoader(object): + self.boot_args.update(args) + self.dracut_args.update(args) + ++ def _set_security_boot_args(self): ++ """Set LSM-related boot args.""" ++ proxy = SECURITY.get_proxy() ++ if proxy.SELinux == SELINUX_DISABLED: ++ self.boot_args.add('selinux=0') ++ + # + # configuration + # +-- +2.23.0 + diff --git a/bugfix-Add-the-DBus-method-IsDeviceShrinkable-1875677.patch b/bugfix-Add-the-DBus-method-IsDeviceShrinkable-1875677.patch new file mode 100644 index 0000000..d8f6d22 --- /dev/null +++ b/bugfix-Add-the-DBus-method-IsDeviceShrinkable-1875677.patch @@ -0,0 +1,149 @@ +From cf8d3811b89b90211cac0cbd1e5ceb40ea7b641b Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Mon, 7 Sep 2020 17:09:15 +0200 +Subject: [PATCH] Add the DBus method IsDeviceShrinkable (#1875677) + +Replace the DBus method IsDeviceResizable with IsDeviceShrinkable and fix its +implementation. A shrinkable device has to be resizable and its minimal size +has to be lower then the current size. This should fix the issue with XFS, that +is resizable, but not shrinkable. + +Resolves: rhbz#1875677 +--- + .../automatic/resizable_interface.py | 6 ++-- + .../automatic/resizable_module.py | 6 ++-- + pyanaconda/ui/gui/spokes/lib/resize.py | 10 +++---- + .../pyanaconda_tests/module_resizable_test.py | 29 +++++++++++++++---- + 4 files changed, 34 insertions(+), 17 deletions(-) + +diff --git a/pyanaconda/modules/storage/partitioning/automatic/resizable_interface.py b/pyanaconda/modules/storage/partitioning/automatic/resizable_interface.py +index 760a49ecb..c531a0b42 100644 +--- a/pyanaconda/modules/storage/partitioning/automatic/resizable_interface.py ++++ b/pyanaconda/modules/storage/partitioning/automatic/resizable_interface.py +@@ -37,13 +37,13 @@ class ResizableDeviceTreeInterface(DeviceTreeInterface): + """ + return self.implementation.is_device_partitioned(device_name) + +- def IsDeviceResizable(self, device_name: Str) -> Bool: +- """Is the specified device resizable? ++ def IsDeviceShrinkable(self, device_name: Str) -> Bool: ++ """Is the specified device shrinkable? + + :param device_name: a name of the device + :return: True or False + """ +- return self.implementation.is_device_resizable(device_name) ++ return self.implementation.is_device_shrinkable(device_name) + + def GetDevicePartitions(self, device_name: Str) -> List[Str]: + """Get partitions of the specified device. +diff --git a/pyanaconda/modules/storage/partitioning/automatic/resizable_module.py b/pyanaconda/modules/storage/partitioning/automatic/resizable_module.py +index 9603dfc1b..12d32e891 100644 +--- a/pyanaconda/modules/storage/partitioning/automatic/resizable_module.py ++++ b/pyanaconda/modules/storage/partitioning/automatic/resizable_module.py +@@ -52,14 +52,14 @@ class ResizableDeviceTreeModule(DeviceTreeModule): + """Is the specified device partitioned?""" + return device.is_disk and device.partitioned and device.format.supported + +- def is_device_resizable(self, device_name): +- """Is the specified device resizable? ++ def is_device_shrinkable(self, device_name): ++ """Is the specified device shrinkable? + + :param device_name: a name of the device + :return: True or False + """ + device = self._get_device(device_name) +- return device.resizable ++ return device.resizable and device.min_size < device.size + + def get_device_partitions(self, device_name): + """Get partitions of the specified device. +diff --git a/pyanaconda/ui/gui/spokes/lib/resize.py b/pyanaconda/ui/gui/spokes/lib/resize.py +index 4695e5332..ee165ada7 100644 +--- a/pyanaconda/ui/gui/spokes/lib/resize.py ++++ b/pyanaconda/ui/gui/spokes/lib/resize.py +@@ -228,13 +228,13 @@ class ResizeDialog(GUIObject): + + # Calculate the free size. + # Devices that are not resizable are still deletable. +- is_resizable = self._device_tree.IsDeviceResizable(device_name) ++ is_shrinkable = self._device_tree.IsDeviceShrinkable(device_name) + size_limits = self._device_tree.GetDeviceSizeLimits(device_name) + + min_size = Size(size_limits[0]) + device_size = Size(device_data.size) + +- if is_resizable: ++ if is_shrinkable: + free_size = device_size - min_size + resize_string = _("%(freeSize)s of %(devSize)s") % { + "freeSize": free_size.human_readable(max_places=1), +@@ -394,10 +394,10 @@ class ResizeDialog(GUIObject): + + # If the selected filesystem does not support shrinking, make that + # button insensitive. +- is_resizable = self._device_tree.IsDeviceResizable(device_name) +- self._shrink_button.set_sensitive(is_resizable) ++ is_shrinkable = self._device_tree.IsDeviceShrinkable(device_name) ++ self._shrink_button.set_sensitive(is_shrinkable) + +- if is_resizable: ++ if is_shrinkable: + min_size = self._device_tree.GetDeviceSizeLimits(device_name)[0] + self._setup_slider(min_size, device_data.size, Size(obj.target)) + +diff --git a/tests/nosetests/pyanaconda_tests/module_resizable_test.py b/tests/nosetests/pyanaconda_tests/module_resizable_test.py +index 3c60e166b..42880b4ca 100644 +--- a/tests/nosetests/pyanaconda_tests/module_resizable_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_resizable_test.py +@@ -18,9 +18,11 @@ + # Red Hat Author(s): Vendula Poncova + # + import unittest ++from unittest.mock import patch + + from blivet.devices import StorageDevice, DiskDevice, PartitionDevice + from blivet.formats import get_format ++from blivet.formats.fs import FS + from blivet.size import Size + + from pyanaconda.modules.storage.partitioning.automatic.resizable_interface import \ +@@ -66,13 +68,28 @@ class ResizableDeviceTreeTestCase(unittest.TestCase): + self.assertEqual(self.interface.IsDevicePartitioned("dev1"), False) + self.assertEqual(self.interface.IsDevicePartitioned("dev2"), True) + +- def is_device_resizable_test(self): +- """Test IsDeviceResizable.""" ++ @patch.object(FS, "update_size_info") ++ def is_device_shrinkable_test(self, update_size_info): ++ """Test IsDeviceShrinkable.""" + self.module.on_storage_changed(create_storage()) +- self._add_device(StorageDevice( +- "dev1" +- )) +- self.assertEqual(self.interface.IsDeviceResizable("dev1"), False) ++ ++ dev1 = StorageDevice( ++ "dev1", ++ exists=True, ++ size=Size("10 GiB"), ++ fmt=get_format(None, exists=True) ++ ) ++ ++ self._add_device(dev1) ++ self.assertEqual(self.interface.IsDeviceShrinkable("dev1"), False) ++ ++ dev1._resizable = True ++ dev1.format._resizable = True ++ dev1.format._min_size = Size("1 GiB") ++ self.assertEqual(self.interface.IsDeviceShrinkable("dev1"), True) ++ ++ dev1.format._min_size = Size("10 GiB") ++ self.assertEqual(self.interface.IsDeviceShrinkable("dev1"), False) + + def get_device_partitions_test(self): + """Test GetDevicePartitions.""" +-- +2.23.0 + diff --git a/bugfix-Allow-to-format-selected-DASDs.patch b/bugfix-Allow-to-format-selected-DASDs.patch new file mode 100644 index 0000000..3b5e851 --- /dev/null +++ b/bugfix-Allow-to-format-selected-DASDs.patch @@ -0,0 +1,32 @@ +From d1d43dc872aa05b7273883fe42debd55e11e6df6 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Mon, 5 Oct 2020 18:57:24 +0200 +Subject: [PATCH] Allow to format selected DASDs + +TUI should allow to format selected DASDs the same way as GUI. + +(cherry-picked from a commit 4f1bc77) + +Related: rhbz#1874394 +--- + pyanaconda/ui/tui/spokes/storage.py | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/pyanaconda/ui/tui/spokes/storage.py b/pyanaconda/ui/tui/spokes/storage.py +index 813dbb052..4ed97d27e 100644 +--- a/pyanaconda/ui/tui/spokes/storage.py ++++ b/pyanaconda/ui/tui/spokes/storage.py +@@ -258,6 +258,10 @@ class StorageSpoke(NormalTUISpoke): + # Wait for storage. + threadMgr.wait(THREAD_STORAGE) + ++ # Allow to format DASDs. ++ self._disk_init_module.SetFormatUnrecognizedEnabled(True) ++ self._disk_init_module.SetFormatLDLEnabled(True) ++ + # Get selected disks. + disks = filter_disks_by_names(self._available_disks, self._selected_disks) + +-- +2.23.0 + diff --git a/bugfix-Always-clear-treeinfo-metadata-1872056.patch b/bugfix-Always-clear-treeinfo-metadata-1872056.patch new file mode 100644 index 0000000..8358be9 --- /dev/null +++ b/bugfix-Always-clear-treeinfo-metadata-1872056.patch @@ -0,0 +1,31 @@ +From 9ef262fbd07508a5dd9becb30a0136fded45e792 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Thu, 3 Sep 2020 14:53:18 +0200 +Subject: [PATCH] Always clear treeinfo metadata (#1872056) + +Metadata from the treeinfo were loaded only during the load of new metadata. +However, this does not work if we have source without metadata (e.g. +mirrorlist). In that case we are loading additional repositories from the old +metadata (not mounted anymore) and not the new ones which may have unexpected +results. + +Resolves: rhbz#1872056 +--- + pyanaconda/payload/dnf/payload.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/pyanaconda/payload/dnf/payload.py b/pyanaconda/payload/dnf/payload.py +index 5fba7e0e7..880886685 100644 +--- a/pyanaconda/payload/dnf/payload.py ++++ b/pyanaconda/payload/dnf/payload.py +@@ -1477,6 +1477,7 @@ class DNFPayload(Payload): + def reset(self): + tear_down_sources(self.proxy) + self.reset_additional_repos() ++ self._install_tree_metadata = None + + shutil.rmtree(DNF_CACHE_DIR, ignore_errors=True) + shutil.rmtree(DNF_PLUGINCONF_DIR, ignore_errors=True) +-- +2.23.0 + diff --git a/bugfix-Always-specify-the-boot-disk.patch b/bugfix-Always-specify-the-boot-disk.patch new file mode 100644 index 0000000..125e02f --- /dev/null +++ b/bugfix-Always-specify-the-boot-disk.patch @@ -0,0 +1,43 @@ +From 6326cb3e866027a5862c0fbd0a1f0a2a86b6836b Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Tue, 23 Jun 2020 17:55:37 +0200 +Subject: [PATCH] Always specify the boot disk + +We should always specify the boot disk when we allocate partitions. Otherwise, +Blivet will choose one of the available disks that don't have to be valid. + +(cherry-picked from a commit 856e011) +--- + .../storage/partitioning/automatic/automatic_partitioning.py | 2 +- + .../modules/storage/partitioning/custom/custom_partitioning.py | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/modules/storage/partitioning/automatic/automatic_partitioning.py b/pyanaconda/modules/storage/partitioning/automatic/automatic_partitioning.py +index acceb4b4e..a88c55d4d 100644 +--- a/pyanaconda/modules/storage/partitioning/automatic/automatic_partitioning.py ++++ b/pyanaconda/modules/storage/partitioning/automatic/automatic_partitioning.py +@@ -175,7 +175,7 @@ class AutomaticPartitioningTask(NonInteractivePartitioningTask): + devs = schedule_partitions(storage, disks, devs, scheme, requests, encrypted, luks_fmt_args) + + # run the autopart function to allocate and grow partitions +- do_partitioning(storage) ++ do_partitioning(storage, boot_disk=storage.bootloader.stage1_disk) + schedule_volumes(storage, devs, scheme, requests, encrypted) + + # grow LVs +diff --git a/pyanaconda/modules/storage/partitioning/custom/custom_partitioning.py b/pyanaconda/modules/storage/partitioning/custom/custom_partitioning.py +index 218bbe13f..754a48e2e 100644 +--- a/pyanaconda/modules/storage/partitioning/custom/custom_partitioning.py ++++ b/pyanaconda/modules/storage/partitioning/custom/custom_partitioning.py +@@ -136,7 +136,7 @@ class CustomPartitioningTask(NonInteractivePartitioningTask): + self._execute_partition_data(storage, data, partition_data) + + if data.partition.partitions: +- do_partitioning(storage) ++ do_partitioning(storage, boot_disk=storage.bootloader.stage1_disk) + + def _execute_partition_data(self, storage, data, partition_data): + """Execute the partition data. +-- +2.23.0 + diff --git a/bugfix-Apply-onboot-policy-even-when-network-was-configured.patch b/bugfix-Apply-onboot-policy-even-when-network-was-configured.patch new file mode 100644 index 0000000..dfc1407 --- /dev/null +++ b/bugfix-Apply-onboot-policy-even-when-network-was-configured.patch @@ -0,0 +1,27 @@ +From b7258aafb1e55b055bc6bcd18b10c83f5a5feec6 Mon Sep 17 00:00:00 2001 +From: Radek Vykydal +Date: Wed, 29 Jul 2020 12:43:26 +0200 +Subject: [PATCH] Apply onboot policy even when network was configured in UI. + +Resolves: rhbz#1856632 +--- + pyanaconda/modules/network/network.py | 3 --- + 1 file changed, 3 deletions(-) + +diff --git a/pyanaconda/modules/network/network.py b/pyanaconda/modules/network/network.py +index aa5a14b38..507d3b5c1 100644 +--- a/pyanaconda/modules/network/network.py ++++ b/pyanaconda/modules/network/network.py +@@ -372,9 +372,6 @@ class NetworkService(KickstartService): + # Not if any network device was configured via kickstart. + if self._original_network_data: + return False +- # Not if any network device was configured in UI. +- if self._use_device_configurations: +- return False + # Not if there is no configuration to apply the policy to + if not self._device_configurations or not self._device_configurations.get_all(): + return False +-- +2.23.0 + diff --git a/bugfix-Automatically-break-lines-in-labels-in-software-sele.patch b/bugfix-Automatically-break-lines-in-labels-in-software-sele.patch new file mode 100644 index 0000000..896f7a8 --- /dev/null +++ b/bugfix-Automatically-break-lines-in-labels-in-software-sele.patch @@ -0,0 +1,53 @@ +From 6c8bfa8649e71d4f20eea69b57dc47b514dd498c Mon Sep 17 00:00:00 2001 +From: Vladimir Slavik +Date: Tue, 11 Aug 2020 17:34:49 +0200 +Subject: [PATCH] Automatically break lines in labels in software selection + spoke + +Resolves: rhbz#1822787 +--- + pyanaconda/ui/gui/spokes/software_selection.glade | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/software_selection.glade b/pyanaconda/ui/gui/spokes/software_selection.glade +index 2965e66df..87804c72e 100644 +--- a/pyanaconda/ui/gui/spokes/software_selection.glade ++++ b/pyanaconda/ui/gui/spokes/software_selection.glade +@@ -1,5 +1,5 @@ + +- ++ + + + +@@ -60,10 +60,12 @@ + + True + False ++ end + 6 + True +- 0 + Base Environment ++ True ++ 0 + + + +@@ -78,10 +80,12 @@ + + True + False ++ end + 6 + True +- 0 + Additional software for Selected Environment ++ True ++ 0 + + + +-- +2.23.0 + diff --git a/bugfix-Cancel-planned-manual-update-of-system-time-on-turni.patch b/bugfix-Cancel-planned-manual-update-of-system-time-on-turni.patch new file mode 100644 index 0000000..c581961 --- /dev/null +++ b/bugfix-Cancel-planned-manual-update-of-system-time-on-turni.patch @@ -0,0 +1,40 @@ +From c21fd65251a0154e2816f1505910b9c99f0661be Mon Sep 17 00:00:00 2001 +From: Radek Vykydal +Date: Tue, 9 Nov 2021 09:33:45 +0100 +Subject: [PATCH] Cancel planned manual update of system time on turning ntp on + +The PR is based on a PR opened by bitcoffee who spotted the issue: +"If we do not cancel the timer, the time may be incorrectly changed by +the timer after the NTP service updates the time" +--- + pyanaconda/ui/gui/spokes/datetime_spoke.py | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/pyanaconda/ui/gui/spokes/datetime_spoke.py b/pyanaconda/ui/gui/spokes/datetime_spoke.py +index 09deabb..8ebc5bb 100644 +--- a/pyanaconda/ui/gui/spokes/datetime_spoke.py ++++ b/pyanaconda/ui/gui/spokes/datetime_spoke.py +@@ -1059,8 +1059,22 @@ class DatetimeSpoke(FirstbootSpokeMixIn, NormalSpoke): + def _show_no_ntp_server_warning(self): + self.set_warning(_("You have no working NTP server configured")) + ++ def _cancel_planned_update(self): ++ """Cancel system time update planned by manual setting""" ++ # cancel system time update ++ if self._start_updating_timer: ++ self._start_updating_timer.cancel() ++ self._start_updating_timer = None ++ # re-enable UI update because it will not be done by the ++ # system time update we've just cancelled ++ if not self._update_datetime_timer: ++ self._update_datetime_timer = Timer() ++ self._update_datetime_timer.timeout_sec(1, self._update_datetime) ++ + def on_ntp_switched(self, switch, *args): + if switch.get_active(): ++ self._cancel_planned_update() ++ + #turned ON + if not conf.system.can_set_time_synchronization: + #cannot touch runtime system, not much to do here + diff --git a/bugfix-Change-keyboard-ordering-to-US-layout-first-native-s.patch b/bugfix-Change-keyboard-ordering-to-US-layout-first-native-s.patch new file mode 100644 index 0000000..9322582 --- /dev/null +++ b/bugfix-Change-keyboard-ordering-to-US-layout-first-native-s.patch @@ -0,0 +1,30 @@ +From e05cc18294e67099bf87076e4f23fe5f031fecb5 Mon Sep 17 00:00:00 2001 +From: Sundeep Anand +Date: Wed, 12 Aug 2020 17:56:19 +0530 +Subject: [PATCH] Change keyboard ordering to US layout first, 'native' second. + Resolves: rhbz#1039185 + +--- + pyanaconda/keyboard.py | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/keyboard.py b/pyanaconda/keyboard.py +index 2c3226c6e..081fc96fd 100644 +--- a/pyanaconda/keyboard.py ++++ b/pyanaconda/keyboard.py +@@ -171,8 +171,10 @@ def set_x_keyboard_defaults(localization_proxy, xkl_wrapper): + # store it normalized + new_layouts = [normalize_layout_variant(layouts[0])] + if not langtable.supports_ascii(layouts[0]): +- # does not support typing ASCII chars, append the default layout +- new_layouts.append(DEFAULT_KEYBOARD) ++ # The default keymap setting should have "us" before the native layout ++ # which does not support ascii, ++ # refer: https://bugzilla.redhat.com/show_bug.cgi?id=1039185 ++ new_layouts.insert(0, DEFAULT_KEYBOARD) + else: + log.error("Failed to get layout for chosen locale '%s'", locale) + new_layouts = [DEFAULT_KEYBOARD] +-- +2.23.0 + diff --git a/bugfix-Check-if-original-partitions-are-mounted-too.patch b/bugfix-Check-if-original-partitions-are-mounted-too.patch new file mode 100644 index 0000000..c021bd1 --- /dev/null +++ b/bugfix-Check-if-original-partitions-are-mounted-too.patch @@ -0,0 +1,66 @@ +From 6515d0779a41c1ea902ada86e4e911821cded92e Mon Sep 17 00:00:00 2001 +From: Vladimir Slavik +Date: Thu, 3 Sep 2020 19:27:28 +0200 +Subject: [PATCH] Check if original partitions are mounted, too + +Resolves: rhbz#1751698 +--- + pyanaconda/modules/storage/checker/utils.py | 30 +++++++++++++++------ + 1 file changed, 22 insertions(+), 8 deletions(-) + +diff --git a/pyanaconda/modules/storage/checker/utils.py b/pyanaconda/modules/storage/checker/utils.py +index ff3ee3a6b..c40aa1dc3 100644 +--- a/pyanaconda/modules/storage/checker/utils.py ++++ b/pyanaconda/modules/storage/checker/utils.py +@@ -417,11 +417,15 @@ def verify_luks2_memory_requirements(storage, constraints, report_error, report_ + def verify_mounted_partitions(storage, constraints, report_error, report_warning): + """ Check the selected disks to make sure all their partitions are unmounted. + ++ Check both the currently known and original partitions. ++ + :param storage: a storage to check + :param constraints: a dictionary of constraints + :param report_error: a function for error reporting + :param report_warning: a function for warning reporting + """ ++ partitions_to_check = {} ++ + for disk in storage.disks: + if disk.protected: + continue +@@ -430,14 +434,24 @@ def verify_mounted_partitions(storage, constraints, report_error, report_warning + continue + + for part in disk.format.partitions: +- part_dev = storage.devicetree.get_device_by_path(part.path) +- if part_dev and part_dev.protected: +- log.debug("Not checking protected %s for being mounted, assuming live " +- "image mount", part.path) +- continue +- if part.busy: +- report_error(_("%s is currently mounted and cannot be used for the " +- "installation. Please unmount it and retry.") % part.path) ++ if part.path not in partitions_to_check: ++ partitions_to_check[part.path] = part ++ ++ if hasattr(disk.original_format, "partitions"): ++ for part in disk.original_format.partitions: ++ if part.path not in partitions_to_check: ++ partitions_to_check[part.path] = part ++ ++ for path, part in partitions_to_check.items(): ++ part_dev = storage.devicetree.get_device_by_path(path) ++ if part_dev and part_dev.protected: ++ log.debug("Not checking protected %s for being mounted, assuming live " ++ "image mount", path) ++ return ++ ++ if part.busy: ++ report_error(_("%s is currently mounted and cannot be used for the " ++ "installation. Please unmount it and retry.") % path) + + + def verify_lvm_destruction(storage, constraints, report_error, report_warning): +-- +2.23.0 + diff --git a/bugfix-Create-ssh-user-using-only-existing-fields-1860058.patch b/bugfix-Create-ssh-user-using-only-existing-fields-1860058.patch new file mode 100644 index 0000000..2fecea9 --- /dev/null +++ b/bugfix-Create-ssh-user-using-only-existing-fields-1860058.patch @@ -0,0 +1,34 @@ +From 76d20274ea13439bdfa277aa18a99a1ee9a61d1c Mon Sep 17 00:00:00 2001 +From: Vladimir Slavik +Date: Mon, 3 Aug 2020 15:05:53 +0200 +Subject: [PATCH] Create ssh user using only existing fields (#1860058) + +This fixes the bug where "homedir" and other fields do not exist on +F24_SshPwData and no user is created for SSH access to installation +environment. + +Resolves: rhbz#1860058 + +Cherry-picked from df09030f0e7c8f350ba36ce99fa1bc33e4f45b6a +--- + utils/handle-sshpw | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/utils/handle-sshpw b/utils/handle-sshpw +index 0918610be..fc4726d88 100755 +--- a/utils/handle-sshpw ++++ b/utils/handle-sshpw +@@ -47,8 +47,8 @@ for ud in userdata: + users.set_user_password(username=ud.username, password=ud.password, + is_crypted=ud.isCrypted, lock=ud.lock) + else: +- users.create_user(username=ud.username, password=ud.password, is_crypted=ud.isCrypted, lock=ud.lock, +- homedir=ud.homedir, shell=ud.shell, gecos=ud.gecos, root="/") ++ users.create_user(username=ud.username, password=ud.password, is_crypted=ud.isCrypted, ++ lock=ud.lock, root="/") + + if ud.sshkey: + # Setup the account so that only the sshkey can be used +-- +2.23.0 + diff --git a/bugfix-Create-the-initial-storage-model-during-the-initiali.patch b/bugfix-Create-the-initial-storage-model-during-the-initiali.patch new file mode 100644 index 0000000..b6285a6 --- /dev/null +++ b/bugfix-Create-the-initial-storage-model-during-the-initiali.patch @@ -0,0 +1,50 @@ +From 5cb9170cafc3f81193fd872a21933a0fa2bd5f2c Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Mon, 6 Jul 2020 14:04:28 +0200 +Subject: [PATCH] Create the initial storage model during the initialization + +After connecting all objects of the Storage service to signals, create +the initial storage model. It will be propagated to all these objects. + +Otherwise, the objects might raise the UnavailableStorageError exception. + +(cherry-picked from a commit fabc9a0) +--- + pyanaconda/modules/storage/storage.py | 4 ++++ + tests/nosetests/pyanaconda_tests/module_storage_test.py | 5 +++++ + 2 files changed, 9 insertions(+) + +diff --git a/pyanaconda/modules/storage/storage.py b/pyanaconda/modules/storage/storage.py +index 9c5aff943..08254b0ce 100644 +--- a/pyanaconda/modules/storage/storage.py ++++ b/pyanaconda/modules/storage/storage.py +@@ -133,6 +133,10 @@ class StorageService(KickstartService): + self.on_protected_devices_changed + ) + ++ # After connecting modules to signals, create the initial ++ # storage model. It will be propagated to all modules. ++ self._set_storage(create_storage()) ++ + def _add_module(self, storage_module): + """Add a base kickstart module.""" + self._modules.append(storage_module) +diff --git a/tests/nosetests/pyanaconda_tests/module_storage_test.py b/tests/nosetests/pyanaconda_tests/module_storage_test.py +index 708981233..6bb1723d5 100644 +--- a/tests/nosetests/pyanaconda_tests/module_storage_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_storage_test.py +@@ -120,6 +120,11 @@ class StorageInterfaceTestCase(unittest.TestCase): + storage_reset_callback = Mock() + self.storage_module.partitioning_reset.connect(storage_reset_callback) + ++ self.assertIsNotNone(self.storage_module.storage) ++ storage_changed_callback.assert_not_called() ++ storage_reset_callback.assert_not_called() ++ ++ self.storage_module._current_storage = None + self.assertIsNotNone(self.storage_module.storage) + storage_changed_callback.assert_called_once() + storage_reset_callback.assert_not_called() +-- +2.23.0 + diff --git a/bugfix-Differentiate-between-RAID-levels-of-a-device-and-it.patch b/bugfix-Differentiate-between-RAID-levels-of-a-device-and-it.patch new file mode 100644 index 0000000..9a72e7c --- /dev/null +++ b/bugfix-Differentiate-between-RAID-levels-of-a-device-and-it.patch @@ -0,0 +1,107 @@ +From c9857a91ece047c0fc2df3554e625d15b4700818 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Thu, 13 Aug 2020 13:04:14 +0200 +Subject: [PATCH] Differentiate between RAID levels of a device and its + container + +The current logic returned the same RAID level for the device and its container, +but we expect that only one of them will have the RAID level set. +--- + .../partitioning/interactive/add_device.py | 4 +- + .../storage/partitioning/interactive/utils.py | 37 +++++++++++++++---- + 2 files changed, 31 insertions(+), 10 deletions(-) + +diff --git a/pyanaconda/modules/storage/partitioning/interactive/add_device.py b/pyanaconda/modules/storage/partitioning/interactive/add_device.py +index 82bb9917a..852cf8fbd 100644 +--- a/pyanaconda/modules/storage/partitioning/interactive/add_device.py ++++ b/pyanaconda/modules/storage/partitioning/interactive/add_device.py +@@ -24,7 +24,7 @@ from pyanaconda.modules.common.errors.configuration import StorageConfigurationE + from pyanaconda.modules.common.structures.device_factory import DeviceFactoryRequest + from pyanaconda.modules.common.task import Task + from pyanaconda.modules.storage.partitioning.interactive.utils import \ +- get_device_raid_level_name, get_container_size_policy, get_device_factory_arguments ++ get_container_raid_level_name, get_container_size_policy, get_device_factory_arguments + from pyanaconda.core.storage import PARTITION_ONLY_FORMAT_TYPES + + log = get_module_logger(__name__) +@@ -141,7 +141,7 @@ class AddDeviceTask(Task): + # Don't override user-initiated changes to a defined container. + request.disks = [d.name for d in container.disks] + request.container_encrypted = container.encrypted +- request.container_raid_level = get_device_raid_level_name(container) ++ request.container_raid_level = get_container_raid_level_name(container) + request.container_size_policy = get_container_size_policy(container) + + # The existing container has a name. +diff --git a/pyanaconda/modules/storage/partitioning/interactive/utils.py b/pyanaconda/modules/storage/partitioning/interactive/utils.py +index 04313eded..d3e56030a 100644 +--- a/pyanaconda/modules/storage/partitioning/interactive/utils.py ++++ b/pyanaconda/modules/storage/partitioning/interactive/utils.py +@@ -696,12 +696,6 @@ def get_device_raid_level(device): + if hasattr(device, "data_level"): + return device.data_level + +- if hasattr(device, "volume"): +- return device.volume.data_level +- +- if not hasattr(device, "vg") and hasattr(device, "lvs") and len(device.parents) == 1: +- return get_device_raid_level(device.parents[0]) +- + return None + + +@@ -711,6 +705,33 @@ def get_device_raid_level_name(device): + return raid_level.name if raid_level else "" + + ++def get_container_raid_level(container): ++ """Get the RAID level of the given container. ++ ++ :param container: a container ++ :return: a RAID level ++ """ ++ # Try to get a RAID level of this device. ++ raid_level = get_device_raid_level(container) ++ ++ if raid_level: ++ return raid_level ++ ++ device = container.raw_device ++ ++ # Or get a RAID level of the LVM container. ++ if hasattr(device, "lvs") and len(device.parents) == 1: ++ return get_container_raid_level(device.parents[0]) ++ ++ return None ++ ++ ++def get_container_raid_level_name(device): ++ """Get the RAID level name of the given container.""" ++ raid_level = get_container_raid_level(device) ++ return raid_level.name if raid_level else "" ++ ++ + def collect_file_system_types(device): + """Collect supported file system types for the given device. + +@@ -855,7 +876,7 @@ def set_container_data(request: DeviceFactoryRequest, container): + request.container_spec = container.name + request.container_name = container.name + request.container_encrypted = container.encrypted +- request.container_raid_level = get_device_raid_level_name(container) ++ request.container_raid_level = get_container_raid_level_name(container) + request.container_size_policy = get_container_size_policy(container) + + if request.container_encrypted: +@@ -1100,7 +1121,7 @@ def _destroy_device(storage, device): + disks=container.disks, + container_name=container.name, + container_encrypted=container.encrypted, +- container_raid_level=get_device_raid_level(container), ++ container_raid_level=get_container_raid_level(container), + container_size=container.size_policy, + ) + +-- +2.23.0 + diff --git a/bugfix-Do-not-mount-as-RW-in-Dracut.patch b/bugfix-Do-not-mount-as-RW-in-Dracut.patch new file mode 100644 index 0000000..1bd705e --- /dev/null +++ b/bugfix-Do-not-mount-as-RW-in-Dracut.patch @@ -0,0 +1,32 @@ +From 681285fff08169abd175beafae9e4144735e8cd9 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Fri, 21 Aug 2020 12:59:03 +0200 +Subject: [PATCH] Do not mount as RW in Dracut + +Dracut do not have reason to mount sources as RW it's more that it wasn't +specified and until recently we just did not bother. However, we changed the +logic that installation sources in stage2 environment does not re-using mounts +from the Dracut so this will now fail to set the environment because you can't +mount one thing as RW and RO at once. + +Resolves: rhbz#1871049 +(cherry picked from commit 408ae390c9ed7d14ad6c51979a18d839720e8be6) +--- + dracut/anaconda-diskroot | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/dracut/anaconda-diskroot b/dracut/anaconda-diskroot +index 7e52e052b..8846b1108 100755 +--- a/dracut/anaconda-diskroot ++++ b/dracut/anaconda-diskroot +@@ -64,6 +64,6 @@ if [ -e /tmp/dd_interactive -a ! -e /tmp/dd.done ]; then + fi + + info "anaconda using disk root at $dev" +-mount $dev $repodir || warn "Couldn't mount $dev" ++mount -o ro $dev $repodir || warn "Couldn't mount $dev" + anaconda_live_root_dir $repodir $path + run_checkisomd5 $dev +-- +2.23.0 + diff --git a/bugfix-Don-t-enter-spokes-after-we-leave-the-Summary-hub.patch b/bugfix-Don-t-enter-spokes-after-we-leave-the-Summary-hub.patch new file mode 100644 index 0000000..ae3a42d --- /dev/null +++ b/bugfix-Don-t-enter-spokes-after-we-leave-the-Summary-hub.patch @@ -0,0 +1,98 @@ +From e0351e363baedcf788d7ff39fea282885229b4dc Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Fri, 27 Nov 2020 16:46:59 +0100 +Subject: [PATCH] Don't enter spokes after we leave the Summary hub + +If we decide to automatically leave the Summary hub and start the installation, +don't allow to enter spokes from the Summary hub anymore. There might be some +unprocessed callbacks in the even queue. + +Resolved: rhzb#1866022 +--- + pyanaconda/ui/gui/hubs/__init__.py | 26 +++++++++++++++----------- + 1 file changed, 15 insertions(+), 11 deletions(-) + +diff --git a/pyanaconda/ui/gui/hubs/__init__.py b/pyanaconda/ui/gui/hubs/__init__.py +index ee30fd004..81979470b 100644 +--- a/pyanaconda/ui/gui/hubs/__init__.py ++++ b/pyanaconda/ui/gui/hubs/__init__.py +@@ -85,10 +85,11 @@ class Hub(GUIObject, common.Hub): + GUIObject.__init__(self, data) + common.Hub.__init__(self, storage, payload) + +- # enable the autoContinue feature if we are in kickstart ++ # enable the auto continue feature if we are in kickstart + # mode, but if the user interacts with the hub, it will be + # disabled again +- self._autoContinue = flags.automatedInstall ++ self._auto_continue = flags.automatedInstall ++ self._click_continue = False + + self._hubs_collection.append(self) + self.timeout = None +@@ -262,7 +263,7 @@ class Hub(GUIObject, common.Hub): + # If this is a kickstart, consider the user to be warned and + # let them continue anyway, manually + if flags.automatedInstall: +- self._autoContinue = False ++ self._auto_continue = False + self._checker_ignore = True + else: + warning = _("Please complete items marked with this icon before continuing to the next step.") +@@ -301,7 +302,6 @@ class Hub(GUIObject, common.Hub): + log.debug("no spokes available on %s, continuing automatically", self) + gtk_call_once(self.window.emit, "continue-clicked") + +- click_continue = False + # Grab all messages that may have appeared since last time this method ran. + while True: + try: +@@ -357,9 +357,9 @@ class Hub(GUIObject, common.Hub): + + if self.continuePossible: + if self._inSpoke: +- self._autoContinue = False +- elif self._autoContinue: +- click_continue = True ++ self._auto_continue = False ++ elif self._auto_continue: ++ self._click_continue = True + + elif code == hubQ.HUB_CODE_MESSAGE: + spoke.selector.set_property("status", args[1]) +@@ -368,9 +368,9 @@ class Hub(GUIObject, common.Hub): + q.task_done() + + # queue is now empty, should continue be clicked? +- if self._autoContinue and click_continue and self.window.get_may_continue(): ++ if self._auto_continue and self._click_continue and self.window.get_may_continue(): + # enqueue the emit to the Gtk message queue +- log.debug("_autoContinue clicking continue button") ++ log.debug("automatically clicking continue button") + gtk_call_once(self.window.emit, "continue-clicked") + + return True +@@ -410,14 +410,18 @@ class Hub(GUIObject, common.Hub): + if selector: + selector.grab_focus() + ++ # The automated kickstart installation already continues. Nothing to do. ++ if self._click_continue: ++ return ++ + # On automated kickstart installs, our desired behavior is to display + # the hub while background processes work, then skip to the progress + # hub immediately after everything's done. + # However if the user proves his intent to change the kickstarted + # values by entering any of the spokes, we need to disable the +- # autoContinue feature and wait for the user to explicitly state ++ # auto continue feature and wait for the user to explicitly state + # that he is done configuring by pressing the continue button. +- self._autoContinue = False ++ self._auto_continue = False + + # Enter the spoke + self._inSpoke = True +-- +2.23.0 + diff --git a/bugfix-Don-t-generate-container-data-for-non-container-devi.patch b/bugfix-Don-t-generate-container-data-for-non-container-devi.patch new file mode 100644 index 0000000..f4d9001 --- /dev/null +++ b/bugfix-Don-t-generate-container-data-for-non-container-devi.patch @@ -0,0 +1,37 @@ +From 0c3cb13730730da2edcc6567bec8256eee9b1770 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Thu, 13 Aug 2020 12:39:40 +0200 +Subject: [PATCH] Don't generate container data for non-container device types + +If the current device type is not a container device type, don't generate +container data for the device factory request. +--- + pyanaconda/modules/storage/partitioning/interactive/utils.py | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/pyanaconda/modules/storage/partitioning/interactive/utils.py b/pyanaconda/modules/storage/partitioning/interactive/utils.py +index 0057abd6e..04313eded 100644 +--- a/pyanaconda/modules/storage/partitioning/interactive/utils.py ++++ b/pyanaconda/modules/storage/partitioning/interactive/utils.py +@@ -808,6 +808,7 @@ def generate_device_factory_request(storage, device) -> DeviceFactoryRequest: + if device_type is None: + raise UnsupportedDeviceError("Unsupported type of {}.".format(device.name)) + ++ # Generate the device data. + request = DeviceFactoryRequest() + request.device_spec = device.name + request.device_name = getattr(device.raw_device, "lvname", device.raw_device.name) +@@ -828,6 +829,10 @@ def generate_device_factory_request(storage, device) -> DeviceFactoryRequest: + + request.disks = [d.name for d in disks] + ++ if request.device_type not in CONTAINER_DEVICE_TYPES: ++ return request ++ ++ # Generate the container data. + factory = devicefactory.get_device_factory( + storage, + device_type=device_type, +-- +2.23.0 + diff --git a/bugfix-Fix-SECTION-headers-in-docstrings.patch b/bugfix-Fix-SECTION-headers-in-docstrings.patch new file mode 100644 index 0000000..768c914 --- /dev/null +++ b/bugfix-Fix-SECTION-headers-in-docstrings.patch @@ -0,0 +1,149 @@ +From 5d81a7faa67bc065a6e309561865d1682abbcee4 Mon Sep 17 00:00:00 2001 +From: Martin Pitt +Date: Wed, 7 Oct 2020 07:26:18 +0200 +Subject: [PATCH] Fix SECTION headers in docstrings +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Make them coincide with the class names. The missing "Anaconda" prefix +causes a build failure with gobject-introspection ≥ 1.61.1 due to [1]: + + Namespace conflict: DocSection('BaseWindow') + +See https://bugzilla.redhat.com/show_bug.cgi?id=1885825 for some +details. + +[1] https://gitlab.gnome.org/GNOME/gobject-introspection/-/commit/346c0690fe62f614ecb1f55857ea72939d9c0f83 +--- + widgets/src/BaseStandalone.c | 2 +- + widgets/src/BaseWindow.c | 2 +- + widgets/src/DiskOverview.c | 2 +- + widgets/src/HubWindow.c | 2 +- + widgets/src/LayoutIndicator.c | 2 +- + widgets/src/MountpointSelector.c | 2 +- + widgets/src/SpokeSelector.c | 2 +- + widgets/src/SpokeWindow.c | 2 +- + widgets/src/StandaloneWindow.c | 2 +- + 9 files changed, 9 insertions(+), 9 deletions(-) + +diff --git a/widgets/src/BaseStandalone.c b/widgets/src/BaseStandalone.c +index 361f94d..ae84bfc 100644 +--- a/widgets/src/BaseStandalone.c ++++ b/widgets/src/BaseStandalone.c +@@ -22,7 +22,7 @@ + #include "intl.h" + + /** +- * SECTION: BaseStandalone ++ * SECTION: AnacondaBaseStandalone + * @title: AnacondaBaseStandalone + * @short_description: Abstract base class for standalone Anaconda windows. + * +diff --git a/widgets/src/BaseWindow.c b/widgets/src/BaseWindow.c +index 203d4a7..35a8fe0 100644 +--- a/widgets/src/BaseWindow.c ++++ b/widgets/src/BaseWindow.c +@@ -30,7 +30,7 @@ + #include + + /** +- * SECTION: BaseWindow ++ * SECTION: AnacondaBaseWindow + * @title: AnacondaBaseWindow + * @short_description: Top-level, non-resizeable window + * +diff --git a/widgets/src/DiskOverview.c b/widgets/src/DiskOverview.c +index c9e6e0b..2d5aec4 100644 +--- a/widgets/src/DiskOverview.c ++++ b/widgets/src/DiskOverview.c +@@ -27,7 +27,7 @@ + #include "widgets-common.h" + + /** +- * SECTION: DiskOverview ++ * SECTION: AnacondaDiskOverview + * @title: AnacondaDiskOverview + * @short_description: A widget that displays basic information about a disk + * +diff --git a/widgets/src/HubWindow.c b/widgets/src/HubWindow.c +index 77d89e8..02ecde4 100644 +--- a/widgets/src/HubWindow.c ++++ b/widgets/src/HubWindow.c +@@ -22,7 +22,7 @@ + #include "intl.h" + + /** +- * SECTION: HubWindow ++ * SECTION: AnacondaHubWindow + * @title: AnacondaHubWindow + * @short_description: Window for displaying a Hub + * +diff --git a/widgets/src/LayoutIndicator.c b/widgets/src/LayoutIndicator.c +index 6e83edd..9fcd983 100644 +--- a/widgets/src/LayoutIndicator.c ++++ b/widgets/src/LayoutIndicator.c +@@ -36,7 +36,7 @@ + #define DEFAULT_LABEL_MAX_CHAR_WIDTH 8 + + /** +- * SECTION: LayoutIndicator ++ * SECTION: AnacondaLayoutIndicator + * @title: AnacondaLayoutIndicator + * @short_description: An indicator of currently activated X layout + * +diff --git a/widgets/src/MountpointSelector.c b/widgets/src/MountpointSelector.c +index e87ba6b..e4b1ad3 100644 +--- a/widgets/src/MountpointSelector.c ++++ b/widgets/src/MountpointSelector.c +@@ -29,7 +29,7 @@ + #include "widgets-common.h" + + /** +- * SECTION: MountpointSelector ++ * SECTION: AnacondaMountpointSelector + * @title: AnacondaMountpointSelector + * @short_description: A graphical way to select a mount point. + * +diff --git a/widgets/src/SpokeSelector.c b/widgets/src/SpokeSelector.c +index 56db102..a6c680a 100644 +--- a/widgets/src/SpokeSelector.c ++++ b/widgets/src/SpokeSelector.c +@@ -28,7 +28,7 @@ + #include "widgets-common.h" + + /** +- * SECTION: SpokeSelector ++ * SECTION: AnacondaSpokeSelector + * @title: AnacondaSpokeSelector + * @short_description: A graphical way to enter a configuration spoke + * +diff --git a/widgets/src/SpokeWindow.c b/widgets/src/SpokeWindow.c +index 226eb2c..7a958c6 100644 +--- a/widgets/src/SpokeWindow.c ++++ b/widgets/src/SpokeWindow.c +@@ -25,7 +25,7 @@ + #include + + /** +- * SECTION: SpokeWindow ++ * SECTION: AnacondaSpokeWindow + * @title: AnacondaSpokeWindow + * @short_description: Window for displaying single spokes + * +diff --git a/widgets/src/StandaloneWindow.c b/widgets/src/StandaloneWindow.c +index 8a92e7b..cc31547 100644 +--- a/widgets/src/StandaloneWindow.c ++++ b/widgets/src/StandaloneWindow.c +@@ -25,7 +25,7 @@ + #include + + /** +- * SECTION: StandaloneWindow ++ * SECTION: AnacondaStandaloneWindow + * @title: AnacondaStandaloneWindow + * @short_description: Window for displaying standalone spokes + * +-- +2.27.0 + diff --git a/bugfix-Fix-checking-ssl-certificate-for-metadata-1745064.patch b/bugfix-Fix-checking-ssl-certificate-for-metadata-1745064.patch new file mode 100644 index 0000000..9c709c5 --- /dev/null +++ b/bugfix-Fix-checking-ssl-certificate-for-metadata-1745064.patch @@ -0,0 +1,30 @@ +From e0168180824ab04d8ee6d798efb039bf3d5555dc Mon Sep 17 00:00:00 2001 +From: Jan Stodola +Date: Sat, 31 Oct 2020 22:16:35 +0100 +Subject: [PATCH] Fix checking ssl certificate for metadata (#1745064) + +If the url kickstart command is used with the --noverifyssl option, the +ssl certificate check needs to be disabled for the repository metadata. + +Resolves: rhbz#1745064 +--- + pyanaconda/payload/dnf/payload.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/pyanaconda/payload/dnf/payload.py b/pyanaconda/payload/dnf/payload.py +index 623518c66..f146b0dce 100644 +--- a/pyanaconda/payload/dnf/payload.py ++++ b/pyanaconda/payload/dnf/payload.py +@@ -1677,7 +1677,8 @@ class DNFPayload(Payload): + # - the path to a cert file + # - True, to use the system's certificates + # - False, to not verify +- ssl_verify = data.ssl_configuration.ca_cert_path or conf.payload.verify_ssl ++ ssl_verify = (data.ssl_configuration.ca_cert_path ++ or (conf.payload.verify_ssl and data.ssl_verification_enabled)) + ssl_client_cert = data.ssl_configuration.client_cert_path or None + ssl_client_key = data.ssl_configuration.client_key_path or None + ssl_cert = (ssl_client_cert, ssl_client_key) if ssl_client_cert else None +-- +2.23.0 + diff --git a/bugfix-Fix-crash-on-first-entering-of-source-spoke.patch b/bugfix-Fix-crash-on-first-entering-of-source-spoke.patch new file mode 100644 index 0000000..07f45a2 --- /dev/null +++ b/bugfix-Fix-crash-on-first-entering-of-source-spoke.patch @@ -0,0 +1,40 @@ +From c3dbffacabc60f0149b142a1f6b3f29739e9288b Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Mon, 27 Jul 2020 18:13:30 +0200 +Subject: [PATCH] Fix crash on first entering of source spoke + +This is called by removing treeinfo repositories which happens thanks to the +initialization of the spoke and selecting first line. Just let everything go +because the repository is added later again (or at least it seems to be working +like that). + +Related: rhbz#1851207 +(cherry picked from commit 5136a4f961c98fec373033027502fba8b409c04d) +--- + pyanaconda/ui/gui/spokes/installation_source.py | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/installation_source.py b/pyanaconda/ui/gui/spokes/installation_source.py +index 0bd3b6938..7ed95c51d 100644 +--- a/pyanaconda/ui/gui/spokes/installation_source.py ++++ b/pyanaconda/ui/gui/spokes/installation_source.py +@@ -1639,10 +1639,12 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + + # Remove the input validation checks for this repo + repo = self._repo_store[itr][REPO_OBJ] +- self.remove_check(self._repo_checks[repo.repo_id].name_check) +- self.remove_check(self._repo_checks[repo.repo_id].url_check) +- self.remove_check(self._repo_checks[repo.repo_id].proxy_check) +- del self._repo_checks[repo.repo_id] ++ # avoid crash when the source is changed because of initialization ++ if repo.repo_id in self._repo_checks: ++ self.remove_check(self._repo_checks[repo.repo_id].name_check) ++ self.remove_check(self._repo_checks[repo.repo_id].url_check) ++ self.remove_check(self._repo_checks[repo.repo_id].proxy_check) ++ del self._repo_checks[repo.repo_id] + + self._repo_store.remove(itr) + if len(self._repo_store) == 0: +-- +2.23.0 + diff --git a/bugfix-Fix-creating-cached-LVs-on-encrypted-PVs.patch b/bugfix-Fix-creating-cached-LVs-on-encrypted-PVs.patch new file mode 100644 index 0000000..e7ea085 --- /dev/null +++ b/bugfix-Fix-creating-cached-LVs-on-encrypted-PVs.patch @@ -0,0 +1,56 @@ +From 373da9db5d1c7d138f87abfb69165bd9de413a41 Mon Sep 17 00:00:00 2001 +From: Vojtech Trefny +Date: Mon, 13 Jul 2020 14:53:42 +0200 +Subject: [PATCH] Fix creating cached LVs on encrypted PVs + +We need to get the child device for encrypted PVs because the PV +devices from kickstart are the underlying partitions, not the +lvmpv formatted LUKS devices. + +Resolves: rhbz#1855973 +--- + .../partitioning/custom/custom_partitioning.py | 14 ++++++++++++-- + 1 file changed, 12 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/modules/storage/partitioning/custom/custom_partitioning.py b/pyanaconda/modules/storage/partitioning/custom/custom_partitioning.py +index 754a48e2e..17c125dd6 100644 +--- a/pyanaconda/modules/storage/partitioning/custom/custom_partitioning.py ++++ b/pyanaconda/modules/storage/partitioning/custom/custom_partitioning.py +@@ -851,6 +851,16 @@ class CustomPartitioningTask(NonInteractivePartitioningTask): + if data.logvol.lvList: + grow_lvm(storage) + ++ def _get_cache_pv_devices(self, devicetree, logvol_data): ++ pv_devices = [] ++ for pvname in logvol_data.cache_pvs: ++ pv = lookup_alias(devicetree, pvname) ++ if pv.format.type == "luks": ++ pv_devices.append(pv.children[0]) ++ else: ++ pv_devices.append(pv) ++ return pv_devices ++ + def _execute_logvol_data(self, storage, data, logvol_data): + """Execute the logvol data. + +@@ -927,7 +937,7 @@ class CustomPartitioningTask(NonInteractivePartitioningTask): + + # If cache PVs specified, check that they belong to the same VG this LV is a member of + if logvol_data.cache_pvs: +- pv_devices = (lookup_alias(devicetree, pv) for pv in logvol_data.cache_pvs) ++ pv_devices = self._get_cache_pv_devices(devicetree, logvol_data) + if not all(pv in vg.pvs for pv in pv_devices): + raise KickstartParseError( + _("Cache PVs must belong to the same VG as the cached LV"), +@@ -1096,7 +1106,7 @@ class CustomPartitioningTask(NonInteractivePartitioningTask): + maxsize = None + + if logvol_data.cache_size and logvol_data.cache_pvs: +- pv_devices = [lookup_alias(devicetree, pv) for pv in logvol_data.cache_pvs] ++ pv_devices = self._get_cache_pv_devices(devicetree, logvol_data) + cache_size = Size("%d MiB" % logvol_data.cache_size) + cache_mode = logvol_data.cache_mode or None + cache_request = LVMCacheRequest(cache_size, pv_devices, cache_mode) +-- +2.23.0 + diff --git a/bugfix-Fix-error-in-initrd-shift-count-out-of-range.patch b/bugfix-Fix-error-in-initrd-shift-count-out-of-range.patch new file mode 100644 index 0000000..bde8d47 --- /dev/null +++ b/bugfix-Fix-error-in-initrd-shift-count-out-of-range.patch @@ -0,0 +1,35 @@ +From bc8779761fe60313a1c754a6b24c2861e86a5e62 Mon Sep 17 00:00:00 2001 +From: Jan Stodola +Date: Sat, 14 Nov 2020 21:27:45 +0100 +Subject: [PATCH] Fix error in initrd: shift count out of range + +anaconda_live_root_dir() can be called with just one argument (for example +when booting boot.iso) and the following error is reported: + +20:26:12,641 INFO dracut-initqueue:anaconda using disk root at /dev/sr0 +20:26:12,650 DEBUG kernel:ISO 9660 Extensions: Microsoft Joliet Level 3 +20:26:12,654 DEBUG kernel:ISO 9660 Extensions: RRIP_1991A +20:26:12,654 INFO dracut-initqueue:/lib/anaconda-lib.sh: line 71: shift: 2: shift count out of range +20:26:12,656 INFO dracut-initqueue:anaconda: found /run/install/repo//images/install.img + +Remove the shift call, since it has no use. +--- + dracut/anaconda-lib.sh | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/dracut/anaconda-lib.sh b/dracut/anaconda-lib.sh +index 3b86f3df9..0dc8c817e 100755 +--- a/dracut/anaconda-lib.sh ++++ b/dracut/anaconda-lib.sh +@@ -68,7 +68,7 @@ rulesfile="/etc/udev/rules.d/90-anaconda.rules" + # try to find a usable runtime image from the repo mounted at $mnt. + # if successful, move the mount(s) to $repodir/$isodir. + anaconda_live_root_dir() { +- local img="" iso="" srcdir="" mnt="$1" path="$2"; shift 2 ++ local img="" iso="" srcdir="" mnt="$1" path="$2" + img=$(find_runtime $mnt/$path) + if [ -n "$img" ]; then + info "anaconda: found $img" +-- +2.23.0 + diff --git a/bugfix-Fix-issue-that-treeinfo-repositories-were-never-disa.patch b/bugfix-Fix-issue-that-treeinfo-repositories-were-never-disa.patch new file mode 100644 index 0000000..ed3a178 --- /dev/null +++ b/bugfix-Fix-issue-that-treeinfo-repositories-were-never-disa.patch @@ -0,0 +1,34 @@ +From a57be7d30897ecf301de673e41d1af975b4f593b Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Wed, 29 Jul 2020 14:12:51 +0200 +Subject: [PATCH] Fix issue that treeinfo repositories were never disabled + +The add_repo() method always enable repository at the end. We should improve +this correctly in the upstream to avoid the confusion. + +Related: rhbz#1851207 +(cherry picked from commit d26f4e4cd054288360993220bc9cee4b7abf5ddc) +--- + pyanaconda/payload/dnf/payload.py | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/pyanaconda/payload/dnf/payload.py b/pyanaconda/payload/dnf/payload.py +index 02a66cd25..56c52f54e 100644 +--- a/pyanaconda/payload/dnf/payload.py ++++ b/pyanaconda/payload/dnf/payload.py +@@ -1852,7 +1852,11 @@ class DNFPayload(Payload): + repo.treeinfo_origin = True + log.debug("Adding new treeinfo repository: %s enabled: %s", + repo_md.name, repo_enabled) +- self.add_repo(repo) ++ ++ if repo_enabled: ++ self.add_repo(repo) ++ else: ++ self.add_disabled_repo(repo) + + def _cleanup_old_treeinfo_repositories(self): + """Remove all old treeinfo repositories before loading new ones. +-- +2.23.0 + diff --git a/bugfix-Fix-issue-when-NFS-path-is-pointing-directly-to-ISO-.patch b/bugfix-Fix-issue-when-NFS-path-is-pointing-directly-to-ISO-.patch new file mode 100644 index 0000000..b5d2951 --- /dev/null +++ b/bugfix-Fix-issue-when-NFS-path-is-pointing-directly-to-ISO-.patch @@ -0,0 +1,79 @@ +From 6317147760315ebc269470b3fdb3f66eaeefe9b2 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Fri, 19 Jun 2020 13:24:57 +0200 +Subject: [PATCH] Fix issue when NFS path is pointing directly to ISO + (#1848718) + +This is totally my fault... The problem is that I thought that this will happen +when mounting and finding the ISO but not for the NFS mount. NFS mount can't be +done for an ISO image but only for the directory. + +Resolves: rhbz#1848718 +Resolves: rhbz#1849083 + +Reported-by: Adam Williamson +--- + .../payloads/source/nfs/initialization.py | 28 ++++++++++++++++--- + 1 file changed, 24 insertions(+), 4 deletions(-) + +diff --git a/pyanaconda/modules/payloads/source/nfs/initialization.py b/pyanaconda/modules/payloads/source/nfs/initialization.py +index 99601bf32..f21c641ea 100644 +--- a/pyanaconda/modules/payloads/source/nfs/initialization.py ++++ b/pyanaconda/modules/payloads/source/nfs/initialization.py +@@ -19,6 +19,7 @@ import os.path + + from pyanaconda.anaconda_loggers import get_module_logger + from pyanaconda.core.payload import parse_nfs_url ++from pyanaconda.core.util import join_paths + from pyanaconda.modules.common.errors.payload import SourceSetupError + from pyanaconda.modules.common.task import Task + from pyanaconda.modules.payloads.source.utils import find_and_mount_iso_image, \ +@@ -54,12 +55,16 @@ class SetUpNFSSourceTask(Task): + mount_point + )) + ++ options, host, path = parse_nfs_url(self._url) ++ path, image = self._split_iso_from_path(path) + try: +- self._mount_nfs() ++ self._mount_nfs(host, options, path) + except PayloadSetupError: + raise SourceSetupError("Could not mount NFS url '{}'".format(self._url)) + +- iso_name = find_and_mount_iso_image(self._device_mount, self._iso_mount) ++ iso_source_path = join_paths(self._device_mount, image) if image else self._device_mount ++ ++ iso_name = find_and_mount_iso_image(iso_source_path, self._iso_mount) + + if iso_name: + log.debug("Using the ISO '%s' mounted at '%s'.", iso_name, self._iso_mount) +@@ -74,9 +79,24 @@ class SetUpNFSSourceTask(Task): + raise SourceSetupError( + "Nothing useful found for NFS source at {}".format(self._url)) + +- def _mount_nfs(self): +- options, host, path = parse_nfs_url(self._url) ++ @staticmethod ++ def _split_iso_from_path(path): ++ """Split ISO from NFS path. ++ ++ NFS path could also contain pointer to ISO which should be mounted. Problem of this ++ is that NFS path with ISO cannot be mounted as NFS mount. We have to split these ++ before mount. ++ ++ :param path: path on the NFS server which could point to ISO ++ :return: tuple of path, iso_file_name; is_file_name is empty if no ISO is part of the path ++ :rtype: tuple (str, str) ++ """ ++ if path.endswith(".iso"): ++ return path.rsplit("/", maxsplit=1) ++ ++ return path, "" + ++ def _mount_nfs(self, host, options, path): + if not options: + options = "nolock" + elif "nolock" not in options: +-- +2.23.0 + diff --git a/bugfix-Fix-issue-when-ns_info-cannot-be-retrieved-for-NVDim.patch b/bugfix-Fix-issue-when-ns_info-cannot-be-retrieved-for-NVDim.patch new file mode 100644 index 0000000..0b22559 --- /dev/null +++ b/bugfix-Fix-issue-when-ns_info-cannot-be-retrieved-for-NVDim.patch @@ -0,0 +1,48 @@ +From 49f6e0e64accb5a1e6590bb08f7986fe7eaec2de Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Thu, 29 Oct 2020 10:08:09 +0100 +Subject: [PATCH] Fix issue when ns_info cannot be retrieved for NVDimm + namespace + +If we don't skip this part the uncaught exception will raise because we +are +trying to concatenate string and None types. + +This is happening when NVDIMM namespace is set to DEVDAX mode. In this +mode +there is no device to be returned so we will got None from blivet. + +Resolves: rhbz#1891827 +(cherry picked from commit 6afc375b164a802e26802ec4ba54d3446c078091) +--- + pyanaconda/modules/storage/nvdimm/nvdimm.py | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/modules/storage/nvdimm/nvdimm.py b/pyanaconda/modules/storage/nvdimm/nvdimm.py +index 4476dd1..5b9c9dc 100644 +--- a/pyanaconda/modules/storage/nvdimm/nvdimm.py ++++ b/pyanaconda/modules/storage/nvdimm/nvdimm.py +@@ -101,6 +101,12 @@ class NVDIMMModule(KickstartBaseModule): + devices_to_ignore = set() + + for ns_name, ns_info in nvdimm.namespaces.items(): ++ # this is happening when namespace is set to DEVDAX mode - block device is not present ++ if ns_info.blockdev is None: ++ log.debug("%s will be skipped - NVDIMM namespace block device information " ++ "can't be retrieved", ns_name) ++ continue ++ + info = udev.get_device(device_node="/dev/" + ns_info.blockdev) + + if info and udev.device_get_format(info) == "iso9660": +@@ -116,8 +122,7 @@ class NVDIMMModule(KickstartBaseModule): + else: + continue + +- if ns_info.blockdev: +- devices_to_ignore.add(ns_info.blockdev) ++ devices_to_ignore.add(ns_info.blockdev) + + return devices_to_ignore + + diff --git a/bugfix-Fix-kickstart-file-error-with-user-groups.patch b/bugfix-Fix-kickstart-file-error-with-user-groups.patch new file mode 100644 index 0000000..5f8ca48 --- /dev/null +++ b/bugfix-Fix-kickstart-file-error-with-user-groups.patch @@ -0,0 +1,40 @@ +From dac59c13424e403f73e3cad46e7412482b17f92a Mon Sep 17 00:00:00 2001 +From: Kai Kang +Date: Thu, 6 Aug 2020 17:09:25 +0800 +Subject: [PATCH] Fix kickstart file error with user groups + +When fill the "Group Membership" in "ADVANCED USER CONFIGURATION" +dialog with the provided example text: + +wheel, my-team (1245), project-x (29935) + +It keeps the spaces between group name(project-x) and group id('(29935)'), +and then write them to kickstart file. When boot anaconda with the kickstart +file, it fails with: + +| unrecognzed arguments: (1245),project-x (29935) + +Filter out the spaces between group name and group id to avoid such +issue. + +Signed-off-by: Kai Kang +--- + pyanaconda/ui/gui/spokes/user.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pyanaconda/ui/gui/spokes/user.py b/pyanaconda/ui/gui/spokes/user.py +index 82fbdc8f8..05e01f8a6 100644 +--- a/pyanaconda/ui/gui/spokes/user.py ++++ b/pyanaconda/ui/gui/spokes/user.py +@@ -157,7 +157,7 @@ class AdvancedUserDialog(GUIObject, GUIDialogInputCheckHandler): + self.user.gid = USER_GID_NOT_SET + + # ''.split(',') returns [''] instead of [], which is not what we want +- self.user.groups = [g.strip() for g in self._tGroups.get_text().split(",") if g] ++ self.user.groups = [''.join(g.split()) for g in self._tGroups.get_text().split(",") if g] + + # Send ready signal to main event loop + hubQ.send_ready(self.__class__.__name__, False) +-- +2.23.0 + diff --git a/bugfix-Fix-more-SElinux-contexts.patch b/bugfix-Fix-more-SElinux-contexts.patch new file mode 100644 index 0000000..4867a62 --- /dev/null +++ b/bugfix-Fix-more-SElinux-contexts.patch @@ -0,0 +1,32 @@ +From 1e873f5084f84e16c5d26b65f29ba401ee7f7f94 Mon Sep 17 00:00:00 2001 +From: Vladimir Slavik +Date: Wed, 3 Jun 2020 15:54:00 +0200 +Subject: [PATCH] Fix more SElinux contexts + +Apparently we have them wrong in /boot/loader/entries and /etc/dnf/modules.d/ +See the linmked bugs for more details. This is a bit workaroundish, but works. + +Resolves: rhbz#1775975 +Resolves: rhbz#1834189 +--- + data/post-scripts/80-setfilecons.ks | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/data/post-scripts/80-setfilecons.ks b/data/post-scripts/80-setfilecons.ks +index d6f183ce8..f08e308e3 100644 +--- a/data/post-scripts/80-setfilecons.ks ++++ b/data/post-scripts/80-setfilecons.ks +@@ -13,7 +13,9 @@ restorecon -ir /etc/sysconfig/network-scripts /etc/lvm /etc/X11/xorg.conf.d \ + /var/lib /var/lib/iscsi /var/lock /var/log /var/spool \ + /var/cache/yum \ + /dev \ +- /root ++ /root \ ++ /boot \ ++ /etc/dnf/modules.d + + # Also relabel the OSTree variants of the traditional mounts if present + restorecon -ir /var/roothome /var/home /var/opt /var/srv /var/media /var/mnt +-- +2.23.0 + diff --git a/bugfix-Fix-passing-of-arguments-when-creating-dracut-argume.patch b/bugfix-Fix-passing-of-arguments-when-creating-dracut-argume.patch new file mode 100644 index 0000000..c421c31 --- /dev/null +++ b/bugfix-Fix-passing-of-arguments-when-creating-dracut-argume.patch @@ -0,0 +1,48 @@ +From 1f384563fd5aa4070cd0b75a6bcaee1648884499 Mon Sep 17 00:00:00 2001 +From: Radek Vykydal +Date: Thu, 4 Jun 2020 13:21:53 +0200 +Subject: [PATCH] Fix passing of arguments when creating dracut arguments for + FCoE + +Resolves: rhbz#1843741 + +Port of https://github.com/rhinstaller/anaconda/pull/2644 +--- + pyanaconda/modules/storage/bootloader/base.py | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/modules/storage/bootloader/base.py b/pyanaconda/modules/storage/bootloader/base.py +index 9ec3def57..4711513ee 100644 +--- a/pyanaconda/modules/storage/bootloader/base.py ++++ b/pyanaconda/modules/storage/bootloader/base.py +@@ -792,19 +792,25 @@ class BootLoader(object): + network_args = [] + ibft = False + nic = "" ++ host_address = dep.host_address or "" + if isinstance(dep, blivet.devices.iScsiDiskDevice): + if dep.iface == "default" or ":" in dep.iface: + node = _get_iscsi_node_from_device(dep) + if iscsi_proxy.IsNodeFromIbft(Node.to_structure(node)): + ibft = True + else: +- nic = iface_for_host_ip(dep.host_address) ++ nic = iface_for_host_ip(host_address) + else: + nic = iscsi_proxy.GetInterface(dep.iface) + else: + nic = dep.nic + if nic or ibft: +- network_args = network_proxy.GetDracutArguments(nic, dep.host_address, "", ibft) ++ network_args = network_proxy.GetDracutArguments( ++ nic, ++ host_address, ++ "", ++ ibft ++ ) + + self.boot_args.update(network_args) + self.dracut_args.update(network_args) +-- +2.23.0 + diff --git a/bugfix-Fix-regression-reading-kernel-list-when-collecting-c.patch b/bugfix-Fix-regression-reading-kernel-list-when-collecting-c.patch new file mode 100644 index 0000000..808d243 --- /dev/null +++ b/bugfix-Fix-regression-reading-kernel-list-when-collecting-c.patch @@ -0,0 +1,41 @@ +From 9858b6e456630d5bdad5b6084c87e60749964f26 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Thu, 11 Jun 2020 16:56:07 +0200 +Subject: [PATCH] Fix regression reading kernel list when collecting + configurations (#1846156) + +We have to have payload prepared to be able to read list of kernels from the +installation source. However, during transitioning to storage module we moved +reading list of kernels to place where the installation tasks are collected +instead of where they are executed. + +Create a function which will read this list and execute everything later during +the installation tasks execution. + +Resolves: rhbz#1846156 +--- + pyanaconda/installation.py | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/installation.py b/pyanaconda/installation.py +index a6ec79401..d9596bac8 100644 +--- a/pyanaconda/installation.py ++++ b/pyanaconda/installation.py +@@ -159,9 +159,12 @@ def _prepare_configuration(payload, ksdata): + # been created, fixing the kernel root and subvol args and adding the missing initrd entry. + bootloader_proxy = STORAGE.get_proxy(BOOTLOADER) + +- if payload.type in PAYLOAD_LIVE_TYPES: ++ def fix_btrfs_bootloader(): + btrfs_task = bootloader_proxy.FixBTRFSWithTask(payload.kernel_version_list) +- generate_initramfs.append_dbus_tasks(STORAGE, [btrfs_task]) ++ sync_run_task(STORAGE.get_proxy(btrfs_task)) ++ ++ if payload.type in PAYLOAD_LIVE_TYPES: ++ generate_initramfs.append(Task("Fix bootloader on BTRFS", fix_btrfs_bootloader)) + + # Invoking zipl should be the last thing done on a s390x installation (see #1652727). + zipl_task = bootloader_proxy.FixZIPLWithTask() +-- +2.23.0 + diff --git a/bugfix-Fix-the-combo-box-for-an-URL-type-of-additional-repo.patch b/bugfix-Fix-the-combo-box-for-an-URL-type-of-additional-repo.patch new file mode 100644 index 0000000..6981d62 --- /dev/null +++ b/bugfix-Fix-the-combo-box-for-an-URL-type-of-additional-repo.patch @@ -0,0 +1,40 @@ +From 2585d3380ebbd516757a2420486e68e2809961db Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Tue, 15 Sep 2020 17:25:28 +0200 +Subject: [PATCH] Fix the combo box for an URL type of additional repositories + (#1879127) + +In the commit ff9a7e1, we introduced new constants for the URL source types with +values 'BASEURL', 'MIRRORLIST' and 'METALINK'. In the commit cc2c3d3, we started +to use these constants in the Installation Source spoke and removed the old ones +with values 'url', 'mirrorlist' and 'metalink'. We updated the combo box for the +base repository, but forgot to update the combo box for additional repositories. + +The combo box items have to have ids that match the values of the constants, +otherwise the URL type of additional repositories will be always 'BASEURL'. + +Resolves: rhbz#1879127 +--- + pyanaconda/ui/gui/spokes/installation_source.glade | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/installation_source.glade b/pyanaconda/ui/gui/spokes/installation_source.glade +index e53fa230c..bc07a4a7b 100644 +--- a/pyanaconda/ui/gui/spokes/installation_source.glade ++++ b/pyanaconda/ui/gui/spokes/installation_source.glade +@@ -1296,9 +1296,9 @@ + False + start + +- repository URL +- mirrorlist +- metalink ++ repository URL ++ mirrorlist ++ metalink + + + +-- +2.23.0 + diff --git a/bugfix-Fix-the-logic-for-enabling-latest-updates.patch b/bugfix-Fix-the-logic-for-enabling-latest-updates.patch new file mode 100644 index 0000000..7d117e5 --- /dev/null +++ b/bugfix-Fix-the-logic-for-enabling-latest-updates.patch @@ -0,0 +1,41 @@ +From 98e011f1f8af900ed6f65432ad7466973d44735a Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Fri, 27 Nov 2020 14:33:31 +0100 +Subject: [PATCH] Fix the logic for enabling latest updates + +Fix bugs introduced in the commit 4a2b8f2. The negation of the logic wasn't +applied correctly. Without the fix, updates repositories are disabled when the +updates are enabled. +--- + pyanaconda/ui/gui/spokes/installation_source.py | 8 +++----- + 1 file changed, 3 insertions(+), 5 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/installation_source.py b/pyanaconda/ui/gui/spokes/installation_source.py +index 00a180613..e528d8662 100644 +--- a/pyanaconda/ui/gui/spokes/installation_source.py ++++ b/pyanaconda/ui/gui/spokes/installation_source.py +@@ -1085,7 +1085,7 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + uncheck it. + """ + self._updates_box.set_sensitive(self._mirror_active()) +- active = self._mirror_active() or self.payload.is_repo_enabled("updates") ++ active = self._mirror_active() and self.payload.is_repo_enabled("updates") + self._updates_radio_button.set_active(active) + + @property +@@ -1646,10 +1646,8 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + + def on_updatesRadioButton_toggled(self, button): + """Toggle the enable state of the updates repo.""" +- if self._updates_radio_button.get_active(): +- self.payload.set_updates_enabled(False) +- else: +- self.payload.set_updates_enabled(True) ++ active = self._updates_radio_button.get_active() ++ self.payload.set_updates_enabled(active) + + # Refresh the metadata using the new set of repos + self._updates_change = True +-- +2.23.0 + diff --git a/bugfix-Fix-traceback-when-removing-additional-repository.patch b/bugfix-Fix-traceback-when-removing-additional-repository.patch new file mode 100644 index 0000000..9ccbdd6 --- /dev/null +++ b/bugfix-Fix-traceback-when-removing-additional-repository.patch @@ -0,0 +1,34 @@ +From b7ee1291870a0d6689ff6d81e1c50999f38cd6b7 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Fri, 21 Aug 2020 15:07:28 +0200 +Subject: [PATCH] Fix traceback when removing additional repository + +I made a mistake by missing `not` in the condition. That way the repo removal as +user action crashed Anaconda because variable which was not set was immediately +used. + +Unfortunately, I tested this just on the failing use-case and in that case it +looked like it is working correctly (but didn't really). + +Resolves: rhbz#1871037 +(cherry picked from commit a92428aa61f31d3f25786d4e90108d0d7751d680) +--- + pyanaconda/ui/gui/spokes/installation_source.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pyanaconda/ui/gui/spokes/installation_source.py b/pyanaconda/ui/gui/spokes/installation_source.py +index 7ed95c51d..76631754b 100644 +--- a/pyanaconda/ui/gui/spokes/installation_source.py ++++ b/pyanaconda/ui/gui/spokes/installation_source.py +@@ -1629,7 +1629,7 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + :param repo_model_path: repo_model_path of what we can remove or None + :type repo_model_path: repo_store repo_model_path + """ +- if repo_model_path is None: ++ if repo_model_path is not None: + itr = self._repo_store[repo_model_path].iter + else: + itr = self._repo_selection.get_selected()[1] +-- +2.23.0 + diff --git a/bugfix-Fix-traceback-when-starting-installation-with-inst.c.patch b/bugfix-Fix-traceback-when-starting-installation-with-inst.c.patch new file mode 100644 index 0000000..e3001b8 --- /dev/null +++ b/bugfix-Fix-traceback-when-starting-installation-with-inst.c.patch @@ -0,0 +1,36 @@ +From 0f9873a2ffbf3a180a236c4f036e54520773f2ca Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Tue, 22 Sep 2020 16:28:48 +0200 +Subject: [PATCH] Fix traceback when starting installation with inst.console + (no args) + +None is the default value which would fail on os.path.basename call. +--- + pyanaconda/modules/storage/bootloader/base.py | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/pyanaconda/modules/storage/bootloader/base.py b/pyanaconda/modules/storage/bootloader/base.py +index b1122287a..a8eb26cc7 100644 +--- a/pyanaconda/modules/storage/bootloader/base.py ++++ b/pyanaconda/modules/storage/bootloader/base.py +@@ -903,10 +903,16 @@ class BootLoader(object): + + def _set_console(self): + """ Set console options based on boot arguments. """ +- console = kernel_arguments.get("console", "") ++ console = kernel_arguments.get("console") ++ ++ if not console: ++ return ++ + console = os.path.basename(console) + self.console, _x, self.console_options = console.partition(",") + ++ log.debug("Console is set to %s with options '%s'", self.console, self.console_options) ++ + def write_config_console(self, config): + """Write console-related configuration lines.""" + pass +-- +2.23.0 + diff --git a/bugfix-GUI-nfs-unknown-error.patch b/bugfix-GUI-nfs-unknown-error.patch new file mode 100644 index 0000000..32d36f2 --- /dev/null +++ b/bugfix-GUI-nfs-unknown-error.patch @@ -0,0 +1,26 @@ +From 7af95c3ee0fe3f0c2a5ec6fb05673f10c19441f9 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Thu, 18 Jun 2020 22:48:03 +0800 +Subject: [PATCH] bugfix GUI nfs unknown error + +--- + pyanaconda/ui/gui/spokes/installation_source.py | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/pyanaconda/ui/gui/spokes/installation_source.py b/pyanaconda/ui/gui/spokes/installation_source.py +index 396cad6..16e81b4 100644 +--- a/pyanaconda/ui/gui/spokes/installation_source.py ++++ b/pyanaconda/ui/gui/spokes/installation_source.py +@@ -1141,6 +1141,9 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + else: + return _("Remote directory is required") + ++ if ":" not in url_string or len(url_string.split(":")) != 2: ++ return _("Server must be specified as SERVER:/PATH") ++ + return InputCheck.CHECK_OK + + def _check_url_entry(self, inputcheck): +-- +2.23.0 + diff --git a/bugfix-Handle-exceptions-from-threads-without-new-instances.patch b/bugfix-Handle-exceptions-from-threads-without-new-instances.patch new file mode 100644 index 0000000..84f56ba --- /dev/null +++ b/bugfix-Handle-exceptions-from-threads-without-new-instances.patch @@ -0,0 +1,36 @@ +From 8ab916a0fe7b46b20c3a51828600b4f7f207717a Mon Sep 17 00:00:00 2001 +From: Vladimir Slavik +Date: Tue, 18 Aug 2020 15:23:49 +0200 +Subject: [PATCH] Handle exceptions from threads without new instances + +It is not possible to instantiate some exceptions with just an instance as +the only argument, for example UnicodeError and descendants. However, these +days it is possible to raise directly with the provided instance, no need to +instantiate the class. The instance also has the traceback already set, so no +need to set it either. + +The original apparently came to be so due to incrementally rewriting python2's +3-argument form of raise. See also previous commits affecting this line, +in chronological order: 07b7034, d16512e, a6085b8. + +Resolves: rhbz#1835027 +--- + pyanaconda/threading.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pyanaconda/threading.py b/pyanaconda/threading.py +index e0ab80229..d2327cf39 100644 +--- a/pyanaconda/threading.py ++++ b/pyanaconda/threading.py +@@ -168,7 +168,7 @@ class ThreadManager(object): + with self._errors_lock: + exc_info = self._errors.pop(name) + if exc_info: +- raise exc_info[0](exc_info[1]).with_traceback(exc_info[2]) ++ raise exc_info[1] + + def in_main_thread(self): + """Return True if it is run in the main thread.""" +-- +2.23.0 + diff --git a/bugfix-Keep-treeinfo-repositories-disabled-after-payload-re.patch b/bugfix-Keep-treeinfo-repositories-disabled-after-payload-re.patch new file mode 100644 index 0000000..931e6de --- /dev/null +++ b/bugfix-Keep-treeinfo-repositories-disabled-after-payload-re.patch @@ -0,0 +1,101 @@ +From 7982cf9937165ad34fe3c4bdf2cf2155a4f5e7f8 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Thu, 30 Jul 2020 13:25:28 +0200 +Subject: [PATCH] Keep treeinfo repositories disabled after payload reset + +Without this change user can't disable repositories because they are +reloaded and enabled automatically as new ones. + +Related: rhbz#1851207 +(cherry picked from commit 0750143fca814b660eba719c2df597d3af5998f8) +--- + pyanaconda/payload/dnf/payload.py | 33 +++++++++++++++++++++++++------ + 1 file changed, 27 insertions(+), 6 deletions(-) + +diff --git a/pyanaconda/payload/dnf/payload.py b/pyanaconda/payload/dnf/payload.py +index 227d32c82..02a66cd25 100644 +--- a/pyanaconda/payload/dnf/payload.py ++++ b/pyanaconda/payload/dnf/payload.py +@@ -1455,7 +1455,7 @@ class DNFPayload(Payload): + log.info("Configuring the base repo") + self.reset() + +- self._cleanup_old_treeinfo_repositories() ++ disabled_treeinfo_repo_names = self._cleanup_old_treeinfo_repositories() + + # Find the source and its type. + source_proxy = self.get_source_proxy() +@@ -1511,7 +1511,7 @@ class DNFPayload(Payload): + base_repo_url = self._get_base_repo_location(install_tree_url) + log.debug("releasever from %s is %s", base_repo_url, self._base.conf.releasever) + +- self._load_treeinfo_repositories(base_repo_url) ++ self._load_treeinfo_repositories(base_repo_url, disabled_treeinfo_repo_names) + except configparser.MissingSectionHeaderError as e: + log.error("couldn't set releasever from base repo (%s): %s", source_type, e) + +@@ -1817,11 +1817,13 @@ class DNFPayload(Payload): + log.debug("No base repository found in treeinfo file. Using installation tree root.") + return install_tree_url + +- def _load_treeinfo_repositories(self, base_repo_url): ++ def _load_treeinfo_repositories(self, base_repo_url, repo_names_to_disable): + """Load new repositories from treeinfo file. + + :param base_repo_url: base repository url. This is not saved anywhere when the function + is called. It will be add to the existing urls if not None. ++ :param repo_names_to_disable: list of repository names which should be disabled after load ++ :type repo_names_to_disable: [str] + """ + if self._install_tree_metadata: + existing_urls = [] +@@ -1838,11 +1840,18 @@ class DNFPayload(Payload): + for repo_md in self._install_tree_metadata.get_metadata_repos(): + if repo_md.path not in existing_urls: + repo_treeinfo = self._install_tree_metadata.get_treeinfo_for(repo_md.name) +- repo_enabled = repo_treeinfo.type in enabled_repositories_from_treeinfo ++ ++ # disable repositories disabled by user manually before ++ if repo_md.name in repo_names_to_disable: ++ repo_enabled = False ++ else: ++ repo_enabled = repo_treeinfo.type in enabled_repositories_from_treeinfo ++ + repo = RepoData(name=repo_md.name, baseurl=repo_md.path, + install=False, enabled=repo_enabled) + repo.treeinfo_origin = True +- log.debug("Adding new treeinfo repository %s", repo_md.name) ++ log.debug("Adding new treeinfo repository: %s enabled: %s", ++ repo_md.name, repo_enabled) + self.add_repo(repo) + + def _cleanup_old_treeinfo_repositories(self): +@@ -1850,12 +1859,24 @@ class DNFPayload(Payload): + + Find all repositories added from treeinfo file and remove them. After this step new + repositories will be loaded from the new link. ++ ++ :return: list of repository names which were disabled before removal ++ :rtype: [str] + """ ++ disabled_repo_names = [] ++ + for ks_repo_name in self.addons: +- if self.get_addon_repo(ks_repo_name).treeinfo_origin: ++ repo = self.get_addon_repo(ks_repo_name) ++ if repo.treeinfo_origin: + log.debug("Removing old treeinfo repository %s", ks_repo_name) ++ ++ if not repo.enabled: ++ disabled_repo_names.append(ks_repo_name) ++ + self.remove_repo(ks_repo_name) + ++ return disabled_repo_names ++ + def _write_dnf_repo(self, repo, repo_path): + """Write a repo object to a DNF repo.conf file. + +-- +2.23.0 + diff --git a/bugfix-Never-mount-partitions-on-a-disk-with-the-iso9660-fi.patch b/bugfix-Never-mount-partitions-on-a-disk-with-the-iso9660-fi.patch new file mode 100644 index 0000000..0191b1b --- /dev/null +++ b/bugfix-Never-mount-partitions-on-a-disk-with-the-iso9660-fi.patch @@ -0,0 +1,101 @@ +From b224f888c225d9735abaa82d0156ae5a4c38d4f5 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Fri, 18 Sep 2020 15:13:14 +0200 +Subject: [PATCH] Never mount partitions on a disk with the iso9660 filesystem + +Blivet doesn't recognize these partitions, so we mount the disk in stage2. +However, it will fail if one of its partitions is already mounted, for example +in stage1. Therefore, skip these partitions in the script that looks for a root +image on devices and use the disk instead. + +The boot.iso has the following structure: + +NAME TYPE SIZE FSTYPE LABEL +sda disk 8.8G iso9660 RHEL-8-3-0-BaseOS-x86_64 +|-sda1 part 8.8G iso9660 RHEL-8-3-0-BaseOS-x86_64 +`-sda2 part 10M vfat ANACONDA + +And the following default boot options: + +inst.stage2=hd:LABEL=RHEL-8-3-0-BaseOS-x86_64 rd.live.check quiet + +Anaconda runs the anaconda-diskroot script for every device that matches the +specification from the inst.stage2 option. In this example, it is sda and sda1, +because both of them have the specified label RHEL-8-3-0-BaseOS-x86_64. With +the fix, the script will detect that sda1 is a partition on a disk with the +iso9660 filesystem and stop to process sda1. The script will succeed with sda. + +We should never rely on the order of the devices. + +(cherry-picked from a commit 301a09d) + +Related: rhbz#1878784 +--- + dracut/anaconda-diskroot | 17 ++++++++++++++++- + dracut/anaconda-lib.sh | 22 ++++++++++++++++++++++ + 2 files changed, 38 insertions(+), 1 deletion(-) + +diff --git a/dracut/anaconda-diskroot b/dracut/anaconda-diskroot +index 8846b1108..2797e6762 100755 +--- a/dracut/anaconda-diskroot ++++ b/dracut/anaconda-diskroot +@@ -41,7 +41,22 @@ dev="$1" + path="$2" # optional, could be empty + kickstart="$(getarg ks= inst.ks=)" + +-[ -e "/dev/root" ] && exit 1 # we already have a root device! ++# Log the device that triggered this job. ++debug_msg "Trying to find a root image on the device $dev." ++ ++# Do we already have a root device? ++# Then do not run again. ++[ -e "/dev/root" ] && exit 1 ++ ++# Skip partitions on a disk with the iso9660 filesystem. Blivet doesn't ++# recognize these partitions, so we mount the disk in stage2. However, it ++# will fail if one of its partitions is already mounted, for example here. ++# Therefore, skip the partitions and use the disk to find our root image. ++# See the bug 1878784. ++if dev_is_on_disk_with_iso9660 "$dev"; then ++ debug_msg "Skipping $dev on a disk with iso9660." ++ exit 1 ++fi + + # If we're waiting for a cdrom kickstart, the user might need to swap discs. + # So if this is a CDROM drive, make a note of it, but don't mount it (yet). +diff --git a/dracut/anaconda-lib.sh b/dracut/anaconda-lib.sh +index e2ab7a205..3b86f3df9 100755 +--- a/dracut/anaconda-lib.sh ++++ b/dracut/anaconda-lib.sh +@@ -253,6 +253,28 @@ dev_is_cdrom() { + udevadm info --query=property --name=$1 | grep -q 'ID_CDROM=1' + } + ++dev_is_on_disk_with_iso9660() { ++ # Get the name of the device. ++ local dev_name="${1}" ++ ++ # Get the path of the device. ++ local dev_path="$(udevadm info -q path --name ${dev_name})" ++ ++ # Is the device a partition? ++ udevadm info -q property --path ${dev_path} | grep -q 'DEVTYPE=partition' || return 1 ++ ++ # Get the path of the parent. ++ local disk_path="${dev_path%/*}" ++ ++ # Is the parent a disk? ++ udevadm info -q property --path ${disk_path} | grep -q 'DEVTYPE=disk' || return 1 ++ ++ # Does the parent has the iso9660 filesystem? ++ udevadm info -q property --path ${disk_path} | grep -q 'ID_FS_TYPE=iso9660' || return 1 ++ ++ return 0 ++} ++ + # dracut doesn't bring up the network unless: + # a) $netroot is set (i.e. you have a network root device), or + # b) /tmp/net.ifaces exists. +-- +2.23.0 + diff --git a/bugfix-Only-pass-one-initrd-image-to-kexec.patch b/bugfix-Only-pass-one-initrd-image-to-kexec.patch new file mode 100644 index 0000000..91c2167 --- /dev/null +++ b/bugfix-Only-pass-one-initrd-image-to-kexec.patch @@ -0,0 +1,40 @@ +From 4766a00a9d67bfe93573e1160ac05fe9c8883aa9 Mon Sep 17 00:00:00 2001 +From: Javier Martinez Canillas +Date: Thu, 9 Jul 2020 15:51:23 +0200 +Subject: [PATCH] Only pass one initrd image to kexec + +The kexec command line tool can only get a single initrd, but a boot entry +can have multiple ones. For example the tuned package adds a variable that +could be set to a second initrd image. + +If that's the case, it will lead to the following error: + +FileNotFoundError: [Errno 2] No such file or directory: +'/mnt/sysroot/boot/initramfs-4.18.0-223.el8.x86_64.img $tuned_initrd' + +Resolves: rhbz#1855290 + +Signed-off-by: Javier Martinez Canillas +--- + pyanaconda/kexec.py | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/pyanaconda/kexec.py b/pyanaconda/kexec.py +index 02033bb7e..37d2b4a14 100644 +--- a/pyanaconda/kexec.py ++++ b/pyanaconda/kexec.py +@@ -72,6 +72,11 @@ def run_grubby(args=None): + if boot_info_fields: + raise GrubbyInfoError("Missing values: %s" % ", ".join(boot_info_fields)) + ++ # There could be multiple initrd images defined for a boot entry, but ++ # the kexec command line tool only supports passing a single initrd. ++ if "initrd" in boot_info_args: ++ boot_info_args["initrd"] = boot_info_args["initrd"].split(" ")[0] ++ + boot_info = boot_info_class(**boot_info_args) + log.info("grubby boot info for (%s): %s", args, boot_info) + return boot_info +-- +2.23.0 + diff --git a/bugfix-Propagate-a-lazy-proxy-of-the-storage-model.patch b/bugfix-Propagate-a-lazy-proxy-of-the-storage-model.patch new file mode 100644 index 0000000..e1642e3 --- /dev/null +++ b/bugfix-Propagate-a-lazy-proxy-of-the-storage-model.patch @@ -0,0 +1,291 @@ +From 39d3a894411e3069cdb14354509153028a48e6c5 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Wed, 2 Sep 2020 13:40:36 +0200 +Subject: [PATCH] Propagate a lazy proxy of the storage model + +The storage model for the partitioning modules should be always created lazily. +When we reset the partitioning, the new model shouldn't be created until we try +to work with it. Then we create a new copy of the storage model, hide disks that +are not selected and possibly initialize empty disks. + +There is a problem with modules that need to be able to work with the same copy +of the storage model as the partitioning modules. At this moment, it is only the +device tree module. The suggested solution is to propagate a lazy proxy of the +storage model. It will not trigger the creation of the copy until we try to +access the attributes of the storage model. + +Basically, the device tree module always gets the storage model on demand from +the storage property of the partitioning module. + +Related: rhbz#1868577 +--- + pyanaconda/core/util.py | 37 +++++++ + .../modules/storage/partitioning/base.py | 28 ++++-- + .../module_part_interactive_test.py | 18 ++++ + tests/nosetests/pyanaconda_tests/util_test.py | 97 ++++++++++++++++++- + 4 files changed, 170 insertions(+), 10 deletions(-) + +diff --git a/pyanaconda/core/util.py b/pyanaconda/core/util.py +index 4615f9fd8..60b6ff310 100644 +--- a/pyanaconda/core/util.py ++++ b/pyanaconda/core/util.py +@@ -1431,3 +1431,40 @@ def is_smt_enabled(): + except (IOError, ValueError): + log.warning("Failed to detect SMT.") + return False ++ ++ ++class LazyObject(object): ++ """The lazy object.""" ++ ++ def __init__(self, getter): ++ """Create a proxy of an object. ++ ++ The object might not exist until we call the given ++ function. The function is called only when we try ++ to access the attributes of the object. ++ ++ The returned object is not cached in this class. ++ We call the function every time. ++ ++ :param getter: a function that returns the object ++ """ ++ self._getter = getter ++ ++ @property ++ def _object(self): ++ return self._getter() ++ ++ def __eq__(self, other): ++ return self._object == other ++ ++ def __hash__(self): ++ return self._object.__hash__() ++ ++ def __getattr__(self, name): ++ return getattr(self._object, name) ++ ++ def __setattr__(self, name, value): ++ if name in ("_getter", ): ++ return super().__setattr__(name, value) ++ ++ return setattr(self._object, name, value) +diff --git a/pyanaconda/modules/storage/partitioning/base.py b/pyanaconda/modules/storage/partitioning/base.py +index 989fa0a7b..c8b4b95ac 100644 +--- a/pyanaconda/modules/storage/partitioning/base.py ++++ b/pyanaconda/modules/storage/partitioning/base.py +@@ -17,12 +17,13 @@ + # License and may only be used or replicated with the express permission of + # Red Hat, Inc. + # +-from abc import abstractmethod, abstractproperty ++from abc import abstractmethod + + from blivet.devices import PartitionDevice, TmpFSDevice, LVMLogicalVolumeDevice, \ + LVMVolumeGroupDevice, MDRaidArrayDevice, BTRFSDevice + + from dasbus.server.publishable import Publishable ++from pyanaconda.core.util import LazyObject + from pyanaconda.modules.common.base.base import KickstartBaseModule + from pyanaconda.modules.common.errors.storage import UnavailableStorageError + from pyanaconda.anaconda_loggers import get_module_logger +@@ -45,7 +46,8 @@ class PartitioningModule(KickstartBaseModule, Publishable): + self._selected_disks = [] + self._device_tree_module = None + +- @abstractproperty ++ @property ++ @abstractmethod + def partitioning_method(self): + """Type of the partitioning method.""" + return None +@@ -67,8 +69,22 @@ class PartitioningModule(KickstartBaseModule, Publishable): + + return self._storage_playground + ++ @property ++ def lazy_storage(self): ++ """The lazy storage model. ++ ++ Provides a lazy access to the storage model. This property will not ++ trigger a creation of the storage playground. The playground will be ++ created on the first access of the storage attributes. ++ """ ++ return LazyObject(lambda: self.storage) ++ + def _create_storage_playground(self): + """Prepare the current storage model for partitioning.""" ++ log.debug( ++ "Creating a new storage playground for %s with " ++ "selected disks %s.", self, self._selected_disks ++ ) + storage = self._current_storage.copy() + storage.select_disks(self._selected_disks) + return storage +@@ -77,16 +93,10 @@ class PartitioningModule(KickstartBaseModule, Publishable): + """Update the current storage.""" + self._current_storage = storage + +- if self._device_tree_module: +- self._device_tree_module.on_storage_changed(self.storage) +- + def on_partitioning_reset(self): + """Drop the storage playground.""" + self._storage_playground = None + +- if self._device_tree_module: +- self._device_tree_module.on_storage_changed(self.storage) +- + def on_selected_disks_changed(self, selection): + """Keep the current disk selection.""" + self._selected_disks = selection +@@ -100,7 +110,7 @@ class PartitioningModule(KickstartBaseModule, Publishable): + + if not module: + module = self._create_device_tree() +- module.on_storage_changed(self.storage) ++ module.on_storage_changed(self.lazy_storage) + self._device_tree_module = module + + return module +diff --git a/tests/nosetests/pyanaconda_tests/module_part_interactive_test.py b/tests/nosetests/pyanaconda_tests/module_part_interactive_test.py +index 13d33feab..32fe589b7 100644 +--- a/tests/nosetests/pyanaconda_tests/module_part_interactive_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_part_interactive_test.py +@@ -71,6 +71,24 @@ class InteractivePartitioningInterfaceTestCase(unittest.TestCase): + """Test Method property.""" + self.assertEqual(self.interface.PartitioningMethod, PARTITIONING_METHOD_INTERACTIVE) + ++ @patch_dbus_publish_object ++ def lazy_storage_test(self, publisher): ++ """Make sure that the storage playground is created lazily.""" ++ self.module.on_storage_changed(create_storage()) ++ ++ device_tree_module = self.module.get_device_tree() ++ self.assertIsNone(self.module._storage_playground) ++ ++ device_tree_module.get_disks() ++ self.assertIsNotNone(self.module._storage_playground) ++ ++ self.module.on_partitioning_reset() ++ self.module.on_storage_changed(create_storage()) ++ self.assertIsNone(self.module._storage_playground) ++ ++ device_tree_module.get_actions() ++ self.assertIsNotNone(self.module._storage_playground) ++ + @patch_dbus_publish_object + def get_device_tree_test(self, publisher): + """Test GetDeviceTree.""" +diff --git a/tests/nosetests/pyanaconda_tests/util_test.py b/tests/nosetests/pyanaconda_tests/util_test.py +index 1da8362dc..76f1c4465 100644 +--- a/tests/nosetests/pyanaconda_tests/util_test.py ++++ b/tests/nosetests/pyanaconda_tests/util_test.py +@@ -29,7 +29,7 @@ from unittest.mock import Mock, patch + from pyanaconda.errors import ExitError + from pyanaconda.core.process_watchers import WatchProcesses + from pyanaconda.core import util +-from pyanaconda.core.util import synchronized ++from pyanaconda.core.util import synchronized, LazyObject + from pyanaconda.core.configuration.anaconda import conf + + from timer import timer +@@ -829,3 +829,98 @@ class MiscTests(unittest.TestCase): + ) + self.assertEqual(get_anaconda_version_string(), "1.0") + self.assertEqual(get_anaconda_version_string(build_time_version=True), "1.0-1") ++ ++ ++class LazyObjectTestCase(unittest.TestCase): ++ ++ class Object(object): ++ ++ def __init__(self): ++ self._x = 0 ++ ++ @property ++ def x(self): ++ return self._x ++ ++ @x.setter ++ def x(self, value): ++ self._x = value ++ ++ def f(self, value): ++ self._x += value ++ ++ def setUp(self): ++ self._obj = None ++ ++ @property ++ def obj(self): ++ if not self._obj: ++ self._obj = self.Object() ++ ++ return self._obj ++ ++ @property ++ def lazy_obj(self): ++ return LazyObject(lambda: self.obj) ++ ++ def get_set_test(self): ++ self.assertIsNotNone(self.lazy_obj) ++ self.assertIsNone(self._obj) ++ ++ self.assertEqual(self.lazy_obj.x, 0) ++ self.assertIsNotNone(self._obj) ++ ++ self.obj.x = -10 ++ self.assertEqual(self.obj.x, -10) ++ self.assertEqual(self.lazy_obj.x, -10) ++ ++ self.lazy_obj.x = 10 ++ self.assertEqual(self.obj.x, 10) ++ self.assertEqual(self.lazy_obj.x, 10) ++ ++ self.lazy_obj.f(90) ++ self.assertEqual(self.obj.x, 100) ++ self.assertEqual(self.lazy_obj.x, 100) ++ ++ def eq_test(self): ++ a = object() ++ lazy_a1 = LazyObject(lambda: a) ++ lazy_a2 = LazyObject(lambda: a) ++ ++ self.assertEqual(a, lazy_a1) ++ self.assertEqual(lazy_a1, a) ++ ++ self.assertEqual(a, lazy_a2) ++ self.assertEqual(lazy_a2, a) ++ ++ self.assertEqual(lazy_a1, lazy_a2) ++ self.assertEqual(lazy_a2, lazy_a1) ++ ++ self.assertEqual(lazy_a1, lazy_a1) ++ self.assertEqual(lazy_a2, lazy_a2) ++ ++ def neq_test(self): ++ a = object() ++ lazy_a = LazyObject(lambda: a) ++ ++ b = object() ++ lazy_b = LazyObject(lambda: b) ++ ++ self.assertNotEqual(b, lazy_a) ++ self.assertNotEqual(lazy_a, b) ++ ++ self.assertNotEqual(lazy_a, lazy_b) ++ self.assertNotEqual(lazy_b, lazy_a) ++ ++ def hash_test(self): ++ a = object() ++ lazy_a1 = LazyObject(lambda: a) ++ lazy_a2 = LazyObject(lambda: a) ++ ++ b = object() ++ lazy_b1 = LazyObject(lambda: b) ++ lazy_b2 = LazyObject(lambda: b) ++ ++ self.assertEqual({a, lazy_a1, lazy_a2}, {a}) ++ self.assertEqual({b, lazy_b1, lazy_b2}, {b}) ++ self.assertEqual({lazy_a1, lazy_b2}, {a, b}) diff --git a/bugfix-Recognize-systemd.unit-anaconda.target-in-anaconda-g.patch b/bugfix-Recognize-systemd.unit-anaconda.target-in-anaconda-g.patch new file mode 100644 index 0000000..2529ae6 --- /dev/null +++ b/bugfix-Recognize-systemd.unit-anaconda.target-in-anaconda-g.patch @@ -0,0 +1,32 @@ +From ac6010448ba29f8c5b979d11cabeb09a91cf260c Mon Sep 17 00:00:00 2001 +From: Mikhail Novosyolov +Date: Mon, 31 Aug 2020 23:44:17 +0300 +Subject: [PATCH] Recognize systemd.unit=anaconda.target in anaconda-generator + +anaconda.target may be activated not only by symlinking /etc/systemd/systemd/default.target, +this symlink may even not exist, but by e.g. adding "systemd.unit=anaconda.target" +to kernel cmdline (see kernel-command-line(7) from systemd) + +`systemctl is-active -q anaconda.target` could be used here, but systemctl is not available from systemd generators. + +P.S. See https://www.redhat.com/archives/anaconda-devel-list/2012-August/msg00118.html for description why generator is needed. +--- + data/systemd/anaconda-generator | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/data/systemd/anaconda-generator b/data/systemd/anaconda-generator +index cda87480d..bafe74ea1 100755 +--- a/data/systemd/anaconda-generator ++++ b/data/systemd/anaconda-generator +@@ -5,7 +5,7 @@ + ANACONDA_TARGET="/lib/systemd/system/anaconda.target" + CURRENT_DEFAULT_TARGET=$(readlink /etc/systemd/system/default.target) + +-if [ "$ANACONDA_TARGET" != "$CURRENT_DEFAULT_TARGET" ]; then ++if ! { [ "$ANACONDA_TARGET" = "$CURRENT_DEFAULT_TARGET" ] || grep -q 'systemd.unit=anaconda.target' /proc/cmdline ;} ; then + exit 0 + fi + +-- +2.23.0 + diff --git a/bugfix-Reconfigure-DNF-payload-after-options-are-set.patch b/bugfix-Reconfigure-DNF-payload-after-options-are-set.patch new file mode 100644 index 0000000..c039241 --- /dev/null +++ b/bugfix-Reconfigure-DNF-payload-after-options-are-set.patch @@ -0,0 +1,29 @@ +From 2acb1357ee8b931662a1353a8956d5a0be176dec Mon Sep 17 00:00:00 2001 +From: Vladimir Slavik +Date: Tue, 23 Jun 2020 18:52:32 +0200 +Subject: [PATCH] Reconfigure DNF payload after options are set + +Fixes broken multilib boot option. + +Resolves: rhbz#1847603 +--- + pyanaconda/payload/dnf/payload.py | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/pyanaconda/payload/dnf/payload.py b/pyanaconda/payload/dnf/payload.py +index f92720890..b38e78cd3 100644 +--- a/pyanaconda/payload/dnf/payload.py ++++ b/pyanaconda/payload/dnf/payload.py +@@ -173,6 +173,9 @@ class DNFPayload(Payload): + if opts.multiLib: + self.data.packages.multiLib = opts.multiLib + ++ # Reset all the other things now that we have new configuration. ++ self._configure() ++ + @property + def type(self): + """The DBus type of the payload.""" +-- +2.23.0 + diff --git a/bugfix-Reload-treeinfo-repositories-on-every-payload-reset.patch b/bugfix-Reload-treeinfo-repositories-on-every-payload-reset.patch new file mode 100644 index 0000000..3cdf27c --- /dev/null +++ b/bugfix-Reload-treeinfo-repositories-on-every-payload-reset.patch @@ -0,0 +1,120 @@ +From 6f27de8a38cc7900bb35a4fac4ec258f50207468 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Thu, 23 Jul 2020 13:49:41 +0200 +Subject: [PATCH] Reload treeinfo repositories on every payload reset + +Remove old repositories before loading new ones. We are changing the logic to +load new ones every time the base repo is changed. + +This will solve problem that additional repositories pointing to an +invalid path. This is happening because we moved source mounting to a +different folders for each source. + +Resolves: rhbz#1851207 +(cherry picked from commit 7b9e6c8ac29d56ed8b5d657ed6729cb5e239d09c) +--- + pyanaconda/payload/base.py | 8 +------- + pyanaconda/payload/dnf/payload.py | 29 +++++++++++++++++++---------- + 2 files changed, 20 insertions(+), 17 deletions(-) + +diff --git a/pyanaconda/payload/base.py b/pyanaconda/payload/base.py +index 285e27e05..5e56dbb29 100644 +--- a/pyanaconda/payload/base.py ++++ b/pyanaconda/payload/base.py +@@ -39,8 +39,6 @@ class Payload(metaclass=ABCMeta): + """ + self.data = data + +- self._first_payload_reset = True +- + # A list of verbose error strings from the subclass + self.verbose_errors = [] + +@@ -67,10 +65,6 @@ class Payload(metaclass=ABCMeta): + """The DBus type of the source.""" + return None + +- @property +- def first_payload_reset(self): +- return self._first_payload_reset +- + def is_ready(self): + """Is the payload ready?""" + return True +@@ -89,7 +83,7 @@ class Payload(metaclass=ABCMeta): + + This method could be overriden. + """ +- self._first_payload_reset = False ++ pass + + def release(self): + """Release any resources in use by this object, but do not do final +diff --git a/pyanaconda/payload/dnf/payload.py b/pyanaconda/payload/dnf/payload.py +index b693c776d..227d32c82 100644 +--- a/pyanaconda/payload/dnf/payload.py ++++ b/pyanaconda/payload/dnf/payload.py +@@ -1455,6 +1455,8 @@ class DNFPayload(Payload): + log.info("Configuring the base repo") + self.reset() + ++ self._cleanup_old_treeinfo_repositories() ++ + # Find the source and its type. + source_proxy = self.get_source_proxy() + source_type = source_proxy.Type +@@ -1507,11 +1509,9 @@ class DNFPayload(Payload): + self._refresh_install_tree(data) + self._base.conf.releasever = self._get_release_version(install_tree_url) + base_repo_url = self._get_base_repo_location(install_tree_url) +- +- if self.first_payload_reset: +- self._add_treeinfo_repositories(install_tree_url, base_repo_url) +- + log.debug("releasever from %s is %s", base_repo_url, self._base.conf.releasever) ++ ++ self._load_treeinfo_repositories(base_repo_url) + except configparser.MissingSectionHeaderError as e: + log.error("couldn't set releasever from base repo (%s): %s", source_type, e) + +@@ -1817,12 +1817,11 @@ class DNFPayload(Payload): + log.debug("No base repository found in treeinfo file. Using installation tree root.") + return install_tree_url + +- def _add_treeinfo_repositories(self, install_tree_url, base_repo_url=None): +- """Add all repositories from treeinfo file which are not already loaded. ++ def _load_treeinfo_repositories(self, base_repo_url): ++ """Load new repositories from treeinfo file. + +- :param install_tree_url: Url to the installation tree root. +- :param base_repo_url: Base repository url. This is not saved anywhere when the function +- is called. It will be add to the existing urls if not None. ++ :param base_repo_url: base repository url. This is not saved anywhere when the function ++ is called. It will be add to the existing urls if not None. + """ + if self._install_tree_metadata: + existing_urls = [] +@@ -1843,9 +1842,19 @@ class DNFPayload(Payload): + repo = RepoData(name=repo_md.name, baseurl=repo_md.path, + install=False, enabled=repo_enabled) + repo.treeinfo_origin = True ++ log.debug("Adding new treeinfo repository %s", repo_md.name) + self.add_repo(repo) + +- return install_tree_url ++ def _cleanup_old_treeinfo_repositories(self): ++ """Remove all old treeinfo repositories before loading new ones. ++ ++ Find all repositories added from treeinfo file and remove them. After this step new ++ repositories will be loaded from the new link. ++ """ ++ for ks_repo_name in self.addons: ++ if self.get_addon_repo(ks_repo_name).treeinfo_origin: ++ log.debug("Removing old treeinfo repository %s", ks_repo_name) ++ self.remove_repo(ks_repo_name) + + def _write_dnf_repo(self, repo, repo_path): + """Write a repo object to a DNF repo.conf file. +-- +2.23.0 + diff --git a/bugfix-Remove-treeinfo-repositories-instead-of-disabling.patch b/bugfix-Remove-treeinfo-repositories-instead-of-disabling.patch new file mode 100644 index 0000000..99df0c8 --- /dev/null +++ b/bugfix-Remove-treeinfo-repositories-instead-of-disabling.patch @@ -0,0 +1,128 @@ +From 6f52b733470d5565bc8e9a2a2415997d0ecbba54 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Mon, 27 Jul 2020 18:08:30 +0200 +Subject: [PATCH] Remove treeinfo repositories instead of disabling + +We now remove repositories from treeinfo file on each payload reset. In that +case we should not disable these in the source spoke but instead remove them. + +We should ideally also show them back to user when the source is switched back +but that logic to detect if base repo repository have changed is not trivial and +we had to change _update_payload_repos to achieve this. We don't want to +introduce any more regressions and that code change could do that. + +Related: rhbz#1851207 +(cherry picked from commit 8b99a20860a193d2816b53e89e562ba01bb1a825) +--- + .../ui/gui/spokes/installation_source.py | 44 ++++++++++++------- + 1 file changed, 28 insertions(+), 16 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/installation_source.py b/pyanaconda/ui/gui/spokes/installation_source.py +index 5b2f56b80..0bd3b6938 100644 +--- a/pyanaconda/ui/gui/spokes/installation_source.py ++++ b/pyanaconda/ui/gui/spokes/installation_source.py +@@ -429,8 +429,6 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + self._network_module = NETWORK.get_proxy() + self._device_tree = STORAGE.get_proxy(DEVICE_TREE) + +- self._treeinfo_repos_already_disabled = False +- + def apply(self): + source_changed = self._update_payload_source() + repo_changed = self._update_payload_repos() +@@ -1205,7 +1203,7 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + # the newly enabled button as well as the previously enabled (now + # disabled) button. + self._on_source_toggled(button, relatedBox) +- self._disable_treeinfo_repositories() ++ self._remove_treeinfo_repositories() + + def _on_source_toggled(self, button, relatedBox): + enabled = button.get_active() +@@ -1283,7 +1281,7 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + button.set_label(os.path.basename(iso_file)) + button.set_use_underline(False) + self._verify_iso_button.set_sensitive(True) +- self._disable_treeinfo_repositories() ++ self._remove_treeinfo_repositories() + + def on_proxy_clicked(self, button): + dialog = ProxyDialog(self.data, self._proxy_url) +@@ -1331,7 +1329,7 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + + def on_protocol_changed(self, combo): + self._on_protocol_changed() +- self._disable_treeinfo_repositories() ++ self._remove_treeinfo_repositories() + + def _on_protocol_changed(self): + # Only allow the URL entry to be used if we're using an HTTP/FTP +@@ -1431,8 +1429,6 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + self._clear_repo_info() + self._repo_entry_box.set_sensitive(False) + +- self._treeinfo_repos_already_disabled = False +- + def _unique_repo_name(self, name): + """ Return a unique variation of the name if it already + exists in the repo store. +@@ -1491,13 +1487,16 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + self._repo_store[repo_model_path][REPO_ENABLED_COL] = enabled + self._repo_store[repo_model_path][REPO_OBJ].enabled = enabled + +- def _disable_treeinfo_repositories(self): ++ def _remove_treeinfo_repositories(self): + """Disable all repositories loaded from the .treeinfo file""" +- if not self._treeinfo_repos_already_disabled: +- self._treeinfo_repos_already_disabled = True +- for repo_item in self._repo_store: +- if repo_item[REPO_OBJ].treeinfo_origin: +- self._set_repo_enabled(repo_item.path, False) ++ removal_repo_list = [] ++ ++ for repo_item in self._repo_store: ++ if repo_item[REPO_OBJ].treeinfo_origin: ++ removal_repo_list.append(repo_item.path) ++ ++ for path in removal_repo_list: ++ self._remove_repository(path) + + def _clear_repo_info(self): + """ Clear the text from the repo entry fields +@@ -1589,7 +1588,7 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + def on_urlEntry_changed(self, editable, data=None): + # Check for and remove a URL prefix that matches the protocol dropdown + self._on_urlEtry_changed(editable) +- self._disable_treeinfo_repositories() ++ self._remove_treeinfo_repositories() + + def _on_urlEtry_changed(self, editable): + self._remove_url_prefix(editable, self._protocol_combo_box, self.on_urlEntry_changed) +@@ -1619,9 +1618,22 @@ class SourceSpoke(NormalSpoke, GUISpokeInputCheckHandler, SourceSwitchHandler): + self._repo_entry_box.set_sensitive(True) + + def on_removeRepo_clicked(self, button): +- """ Remove the selected repository ++ """Remove the selected repository""" ++ self._remove_repository() ++ ++ def _remove_repository(self, repo_model_path=None): ++ """Remove repository on repo_model_path or current selection. ++ ++ If repo_model_path is not specified then current selection will be used. ++ ++ :param repo_model_path: repo_model_path of what we can remove or None ++ :type repo_model_path: repo_store repo_model_path + """ +- itr = self._repo_selection.get_selected()[1] ++ if repo_model_path is None: ++ itr = self._repo_store[repo_model_path].iter ++ else: ++ itr = self._repo_selection.get_selected()[1] ++ + if not itr: + return + +-- +2.23.0 + diff --git a/bugfix-Reset-the-RAID-level-of-the-device-request-1828092.patch b/bugfix-Reset-the-RAID-level-of-the-device-request-1828092.patch new file mode 100644 index 0000000..77d1da7 --- /dev/null +++ b/bugfix-Reset-the-RAID-level-of-the-device-request-1828092.patch @@ -0,0 +1,31 @@ +From d76c6060ea59215dbb90299b28fc8d59abf8e0fa Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Wed, 13 May 2020 18:59:26 +0200 +Subject: [PATCH] Reset the RAID level of the device request (#1828092) + +In the custom partitioning spoke, always reset the RAID level of the +device request when the device type changes. Otherwise, the new type +doesn't have to support the old RAID level. + +Resolves: rhbz#1828092 +--- + pyanaconda/ui/gui/spokes/custom_storage.py | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/pyanaconda/ui/gui/spokes/custom_storage.py b/pyanaconda/ui/gui/spokes/custom_storage.py +index 4e174a5f5..f0596263b 100644 +--- a/pyanaconda/ui/gui/spokes/custom_storage.py ++++ b/pyanaconda/ui/gui/spokes/custom_storage.py +@@ -1729,7 +1729,8 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + # this has to be done before calling populate_raid since it will need + # the raid level combo to contain the relevant raid levels for the new + # device type +- self._populate_raid() ++ self._request.device_raid_level = get_default_raid_level(new_type) ++ self._populate_raid(self._request.device_raid_level) + + # Generate a new container configuration for the new type. + self._request = DeviceFactoryRequest.from_structure( +-- +2.23.0 + diff --git a/bugfix-Reset-the-state-of-the-custom-partitioning-spoke.patch b/bugfix-Reset-the-state-of-the-custom-partitioning-spoke.patch new file mode 100644 index 0000000..f79a45d --- /dev/null +++ b/bugfix-Reset-the-state-of-the-custom-partitioning-spoke.patch @@ -0,0 +1,134 @@ +From 8032e0a9eae5093e1246af4d3906ad65d5aade12 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Wed, 12 Aug 2020 12:38:27 +0200 +Subject: [PATCH] Reset the state of the custom partitioning spoke + +We should reset the state of the custom partitioning spoke after every user +interaction. If we don't reset the _back_already_clicked attribute, the final +check of the storage configuration might be skipped and the status of the +storage spoke might show an error from the previously failed check. + +(cherry picked from commit 0aa3abfb6fb137746a393588ff9315807a65f7b9) +--- + pyanaconda/ui/gui/spokes/custom_storage.py | 28 +++++++++++----------- + 1 file changed, 14 insertions(+), 14 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/custom_storage.py b/pyanaconda/ui/gui/spokes/custom_storage.py +index a5e568f..6c9f6f6 100644 +--- a/pyanaconda/ui/gui/spokes/custom_storage.py ++++ b/pyanaconda/ui/gui/spokes/custom_storage.py +@@ -280,7 +280,7 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + self._storage_module.ResetPartitioning() + + def refresh(self): +- self.clear_errors() ++ self.reset_state() + NormalSpoke.refresh(self) + + # Make sure the storage spoke execute method has finished before we +@@ -294,8 +294,6 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + self._partitioning = create_partitioning(PARTITIONING_METHOD_INTERACTIVE) + self._device_tree = STORAGE.get_proxy(self._partitioning.GetDeviceTree()) + +- self._back_already_clicked = False +- + # Get the name of the new installation. + self._os_name = self._device_tree.GenerateSystemName() + +@@ -554,8 +552,7 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + # just-removed device + return + +- self.clear_errors() +- self._back_already_clicked = False ++ self.reset_state() + + log.debug("Saving the right side for device: %s", device_name) + +@@ -979,7 +976,7 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + + def on_add_clicked(self, button): + # Clear any existing errors +- self.clear_errors() ++ self.reset_state() + + # Save anything from the currently displayed mount point. + self._save_right_side(self._accordion.current_selector) +@@ -996,8 +993,6 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + dialog.window.destroy() + return + +- self._back_already_clicked = False +- + # Gather data about the added mount point. + request = DeviceFactoryRequest() + request.mount_point = dialog.mount_point +@@ -1006,7 +1001,7 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + request.disks = self._selected_disks + + # Clear errors and try to add the mountpoint/device. +- self.clear_errors() ++ self.reset_state() + + try: + self._device_tree.AddDevice( +@@ -1065,7 +1060,7 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + return + + # Remove selected items. +- self.clear_errors() ++ self.reset_state() + + try: + self._remove_selected_devices() +@@ -1187,7 +1182,7 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + # disk set management happens through container edit on RHS + return + +- self.clear_errors() ++ self.reset_state() + + dialog = DisksDialog( + self.data, +@@ -1448,7 +1443,7 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + + Note: There are never any non-existent devices around when this runs. + """ +- self.clear_errors() ++ self.reset_state() + + # Create the partitioning request. + request = PartitioningRequest() +@@ -1762,6 +1757,10 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + self._error = None + self.clear_info() + ++ def reset_state(self): ++ self.clear_errors() ++ self._back_already_clicked = False ++ + # This callback is for the button that just resets the UI to anaconda's + # current understanding of the disk layout. + def on_reset_clicked(self, *args): +@@ -1846,7 +1845,7 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + return + + # Clear any existing errors +- self.clear_errors() ++ self.reset_state() + + # Save anything from the currently displayed mount point. + self._save_right_side(selector) +@@ -1861,7 +1860,8 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + if not selector: + return + +- self.clear_errors() ++ self.reset_state() ++ + device_name = selector.device_name + passphrase = self._passphraseEntry.get_text() + +-- +2.23.0 + diff --git a/bugfix-Root-password-is-mandatory-if-there-is-not-admin-use.patch b/bugfix-Root-password-is-mandatory-if-there-is-not-admin-use.patch new file mode 100644 index 0000000..8be445c --- /dev/null +++ b/bugfix-Root-password-is-mandatory-if-there-is-not-admin-use.patch @@ -0,0 +1,28 @@ +From 1f05aab7135ee3c5843c24a2a89bb707dcbe0dc5 Mon Sep 17 00:00:00 2001 +From: Radek Vykydal +Date: Thu, 17 Sep 2020 07:49:54 +0200 +Subject: [PATCH] Root password is mandatory if there is *not* admin user. + +Related: rhbz#1876727 +--- + pyanaconda/ui/tui/spokes/root_password.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/ui/tui/spokes/root_password.py b/pyanaconda/ui/tui/spokes/root_password.py +index 6ebc8f65e..345b5dcfe 100644 +--- a/pyanaconda/ui/tui/spokes/root_password.py ++++ b/pyanaconda/ui/tui/spokes/root_password.py +@@ -61,8 +61,8 @@ class PasswordSpoke(FirstbootSpokeMixIn, NormalTUISpoke): + + @property + def mandatory(self): +- """Root password spoke is mandatory if no users with admin rights have been requested.""" +- return self._users_module.CheckAdminUserExists() ++ """Only mandatory if no admin user has been requested.""" ++ return not self._users_module.CheckAdminUserExists() + + @property + def status(self): +-- +2.23.0 + diff --git a/bugfix-Run-actions-of-the-Resize-dialog-in-the-reversed-ord.patch b/bugfix-Run-actions-of-the-Resize-dialog-in-the-reversed-ord.patch new file mode 100644 index 0000000..a3a368b --- /dev/null +++ b/bugfix-Run-actions-of-the-Resize-dialog-in-the-reversed-ord.patch @@ -0,0 +1,86 @@ +From 2ca64adb8effcdfa8a883ee9f8fc2015cbece685 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Tue, 28 Jul 2020 11:58:23 +0200 +Subject: [PATCH] Run actions of the Resize dialog in the reversed order + (#1856496) + +If there is a disk with two logical partitions sda5 and sda6 and we remove sda5, +Blivet renames the partition sda6 to sda5, so the actions for sda6 are no longer +valid. Run actions in the reversed order to avoid this situation. + +Resolves: rhbz#1856496 +--- + pyanaconda/ui/gui/spokes/lib/resize.py | 29 +++++++++++++++++++++----- + 1 file changed, 24 insertions(+), 5 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/lib/resize.py b/pyanaconda/ui/gui/spokes/lib/resize.py +index 71dcffb05..4695e5332 100644 +--- a/pyanaconda/ui/gui/spokes/lib/resize.py ++++ b/pyanaconda/ui/gui/spokes/lib/resize.py +@@ -19,6 +19,7 @@ + from collections import namedtuple + from blivet.size import Size + ++from pyanaconda.anaconda_loggers import get_module_logger + from pyanaconda.core.i18n import _, C_, N_, P_ + from pyanaconda.modules.common.constants.services import STORAGE + from pyanaconda.modules.common.structures.storage import OSData, DeviceData, DeviceFormatData +@@ -55,6 +56,8 @@ SHRINK = N_("Shrink") + DELETE = N_("Delete") + NOTHING = "" + ++log = get_module_logger(__name__) ++ + + class ResizeDialog(GUIObject): + builderObjects = ["actionStore", "diskStore", "resizeDialog", "resizeAdjustment"] +@@ -521,7 +524,8 @@ class ResizeDialog(GUIObject): + self._update_reclaim_button(self._selected_reclaimable_space) + self._update_action_buttons() + +- def _schedule_actions(self, model, path, itr, *args): ++ def _collect_actionable_rows(self, model, path, itr, rows): ++ """Collect rows that can be transformed into actions.""" + obj = PartStoreRow(*model[itr]) + + if not obj.name: +@@ -530,17 +534,32 @@ class ResizeDialog(GUIObject): + if not obj.editable: + return False + ++ rows.append(obj) ++ return False ++ ++ def _schedule_actions(self, obj): ++ """Schedule actions for the given row object.""" + if obj.action == _(PRESERVE): +- pass ++ log.debug("Preserve %s.", obj.name) + elif obj.action == _(SHRINK): ++ log.debug("Shrink %s to %s.", obj.name, Size(obj.target)) + self._device_tree.ShrinkDevice(obj.name, obj.target) + elif obj.action == _(DELETE): ++ log.debug("Remove %s.", obj.name) + self._device_tree.RemoveDevice(obj.name) + +- return False +- + def on_resize_clicked(self, *args): +- self._disk_store.foreach(self._schedule_actions, None) ++ rows = [] ++ ++ # Collect the rows. ++ self._disk_store.foreach(self._collect_actionable_rows, rows) ++ ++ # Process rows in the reversed order. If there is a disk with ++ # two logical partitions sda5 and sda6 and we remove sda5, Blivet ++ # renames the partition sda6 to sda5, so the actions for sda6 are ++ # no longer valid. See the bug 1856496. ++ for obj in reversed(rows): ++ self._schedule_actions(obj) + + def on_delete_all_clicked(self, button, *args): + if button.get_label() == C_("GUI|Reclaim Dialog", "Delete _all"): +-- +2.23.0 + diff --git a/bugfix-Schedule-timed-actions-with-the-right-selector-18516.patch b/bugfix-Schedule-timed-actions-with-the-right-selector-18516.patch new file mode 100644 index 0000000..09d1c06 --- /dev/null +++ b/bugfix-Schedule-timed-actions-with-the-right-selector-18516.patch @@ -0,0 +1,59 @@ +From 65b176260404087f625132697c1e299db0b0163e Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Wed, 8 Jul 2020 19:07:54 +0200 +Subject: [PATCH] Schedule timed actions with the right selector (#1851647) + +The timed actions of the Custom Partitioning spoke should be scheduled with +the specified selector. Otherwise, the action will use the current selector +that might be different. + +Resolves: rhbz#1851647 +--- + pyanaconda/ui/gui/spokes/custom_storage.py | 20 ++++++++++++++++---- + 1 file changed, 16 insertions(+), 4 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/custom_storage.py b/pyanaconda/ui/gui/spokes/custom_storage.py +index b89866c..a5e568f 100644 +--- a/pyanaconda/ui/gui/spokes/custom_storage.py ++++ b/pyanaconda/ui/gui/spokes/custom_storage.py +@@ -1836,21 +1836,33 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + dlg.run() + dlg.destroy() + +- @timed_action(delay=50, threshold=100) + def on_update_settings_clicked(self, button): ++ self._update_settings(self._accordion.current_selector) ++ ++ @timed_action(delay=50, threshold=100) ++ def _update_settings(self, selector): + """ call _save_right_side, then, perhaps, populate_right_side. """ ++ if not selector: ++ return ++ + # Clear any existing errors + self.clear_errors() + + # Save anything from the currently displayed mount point. +- self._save_right_side(self._accordion.current_selector) ++ self._save_right_side(selector) + self._applyButton.set_sensitive(False) + +- @timed_action(delay=50, threshold=100) + def on_unlock_clicked(self, *args): ++ self._unlock_device(self._accordion.current_selector) ++ ++ @timed_action(delay=50, threshold=100) ++ def _unlock_device(self, selector): + """ try to open the luks device, populate, then call _do_refresh. """ ++ if not selector: ++ return ++ + self.clear_errors() +- device_name = self._accordion.current_selector.device_name ++ device_name = selector.device_name + passphrase = self._passphraseEntry.get_text() + + log.info("Trying to unlock device %s.", device_name) +-- +2.23.0 + diff --git a/bugfix-Show-warning-message-when-entered-size-is-not-valid.patch b/bugfix-Show-warning-message-when-entered-size-is-not-valid.patch new file mode 100644 index 0000000..5bfd6b1 --- /dev/null +++ b/bugfix-Show-warning-message-when-entered-size-is-not-valid.patch @@ -0,0 +1,96 @@ +From 4bf4ba6d9a11cfd652ce48cdaea86bd617b6332a Mon Sep 17 00:00:00 2001 +From: Vladimir Slavik +Date: Tue, 28 Apr 2020 14:40:53 +0200 +Subject: [PATCH] Show warning message when entered size is not valid + +Requires also reordering checks such that the identical text test comes later. + +Resolves: rhbz#1809573 +--- + pyanaconda/ui/gui/spokes/custom_storage.py | 31 ++++++++++++++----- + .../gui/spokes/lib/custom_storage_helpers.py | 10 ++++++ + 2 files changed, 33 insertions(+), 8 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/custom_storage.py b/pyanaconda/ui/gui/spokes/custom_storage.py +index 08e62cc40..f3755c48c 100644 +--- a/pyanaconda/ui/gui/spokes/custom_storage.py ++++ b/pyanaconda/ui/gui/spokes/custom_storage.py +@@ -63,7 +63,8 @@ from pyanaconda.ui.gui.spokes.lib.custom_storage_helpers import get_size_from_en + get_selected_raid_level, get_default_raid_level, get_container_type, AddDialog,\ + ConfirmDeleteDialog, DisksDialog, ContainerDialog, NOTEBOOK_LABEL_PAGE, NOTEBOOK_DETAILS_PAGE,\ + NOTEBOOK_LUKS_PAGE, NOTEBOOK_UNEDITABLE_PAGE, NOTEBOOK_INCOMPLETE_PAGE, NEW_CONTAINER_TEXT,\ +- CONTAINER_TOOLTIP, get_supported_device_raid_levels, generate_request_description ++ CONTAINER_TOOLTIP, DESIRED_CAPACITY_ERROR, get_supported_device_raid_levels, \ ++ generate_request_description + from pyanaconda.ui.gui.spokes.lib.passphrase import PassphraseDialog + from pyanaconda.ui.gui.spokes.lib.refresh import RefreshDialog + from pyanaconda.ui.gui.spokes.lib.summary import ActionSummaryDialog +@@ -1552,23 +1553,37 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + self._request.device_raid_level = get_selected_raid_level(self._raidLevelCombo) + self.on_value_changed() + +- def on_size_changed(self, widget): ++ @timed_action(750, 1500, False) ++ def on_size_changed(self, *args): ++ """Callback for text change in "desired capacity" widget""" + if not self._sizeEntry.get_sensitive(): + return + +- current_size = Size(self._request.device_size) +- displayed_size = current_size.human_readable(max_places=self.MAX_SIZE_PLACES) +- +- if displayed_size == self._sizeEntry.get_text(): +- return +- + size = get_size_from_entry( + self._sizeEntry, + lower_bound=self.MIN_SIZE_ENTRY, + units=SIZE_UNITS_DEFAULT + ) + ++ # Show warning if the size string is invalid. Field self._error is used as a "flag" that ++ # the last error was the same. This is done because this warning can fire on every change, ++ # so it would keep flickering at the bottom as you type. + if size is None: ++ if self._error != DESIRED_CAPACITY_ERROR: ++ self.clear_errors() ++ self.set_detailed_warning( ++ _("Invalid input. Specify the Desired Capacity in whole or decimal numbers, " ++ "with an appropriate unit."), ++ _(DESIRED_CAPACITY_ERROR) ++ ) ++ return ++ elif self._error == DESIRED_CAPACITY_ERROR: ++ self.clear_errors() ++ ++ current_size = Size(self._request.device_size) ++ displayed_size = current_size.human_readable(max_places=self.MAX_SIZE_PLACES) ++ ++ if displayed_size == self._sizeEntry.get_text(): + return + + self._request.device_size = size.get_bytes() +diff --git a/pyanaconda/ui/gui/spokes/lib/custom_storage_helpers.py b/pyanaconda/ui/gui/spokes/lib/custom_storage_helpers.py +index f7ae6cfa3..0dffaf0f7 100644 +--- a/pyanaconda/ui/gui/spokes/lib/custom_storage_helpers.py ++++ b/pyanaconda/ui/gui/spokes/lib/custom_storage_helpers.py +@@ -49,6 +49,16 @@ CONTAINER_TOOLTIP = N_("Create or select %(container_type)s") + CONTAINER_DIALOG_TITLE = N_("CONFIGURE %(container_type)s") + CONTAINER_DIALOG_TEXT = N_("Please create a name for this %(container_type)s " + "and select at least one disk below.") ++DESIRED_CAPACITY_ERROR = N_( ++ "Specify the Desired Capacity in whole or decimal numbers, with an appropriate unit.\n\n" ++ "Spaces separating digit groups are not allowed. Units consist of a decimal or binary " ++ "prefix, and optionally the letter B. Letter case does not matter for units. The default " ++ "unit used when units are left out is MiB.\n\n" ++ "Examples of valid input:\n" ++ "'100 GiB' = 100 gibibytes\n" ++ "'512m' = 512 megabytes\n" ++ "'123456789' = 123 terabytes and a bit less than a half\n" ++) + + ContainerType = namedtuple("ContainerType", ["name", "label"]) + +-- +2.23.0 + diff --git a/bugfix-Solve-the-problem-that-the-circular-loading-progress-bar-does-not-rotate.patch b/bugfix-Solve-the-problem-that-the-circular-loading-progress-bar-does-not-rotate.patch new file mode 100644 index 0000000..b9ea22e --- /dev/null +++ b/bugfix-Solve-the-problem-that-the-circular-loading-progress-bar-does-not-rotate.patch @@ -0,0 +1,25 @@ +From ccc28e983cd2c1f1f02fd00b9b1659fb572bac1b Mon Sep 17 00:00:00 2001 +From: yueyuankun +Date: Tue, 23 Aug 2022 15:53:18 +0800 +Subject: [PATCH] Solve the problem that sometimes the circular + loading progress bar does not rotate + +--- + pyanaconda/ui/gui/spokes/installation_progress.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/pyanaconda/ui/gui/spokes/installation_progress.py b/pyanaconda/ui/gui/spokes/installation_progress.py +index 0de742b..5ed3424 100644 +--- a/pyanaconda/ui/gui/spokes/installation_progress.py ++++ b/pyanaconda/ui/gui/spokes/installation_progress.py +@@ -85,6 +85,7 @@ class ProgressSpoke(StandaloneSpoke): + + if code == progressQ.PROGRESS_CODE_INIT: + self._init_progress_bar(args[0]) ++ gtk_call_once(self._spinner.start) + elif code == progressQ.PROGRESS_CODE_STEP: + self._step_progress_bar() + elif code == progressQ.PROGRESS_CODE_MESSAGE: +-- +2.27.0 + diff --git a/bugfix-The-underline-character-should-not-be-displayed.patch b/bugfix-The-underline-character-should-not-be-displayed.patch new file mode 100644 index 0000000..6035f9d --- /dev/null +++ b/bugfix-The-underline-character-should-not-be-displayed.patch @@ -0,0 +1,26 @@ +From 82ec245b104c4cc87f322b8824e04f22961c34d6 Mon Sep 17 00:00:00 2001 +From: Jan Stodola +Date: Mon, 31 Aug 2020 21:59:43 +0200 +Subject: [PATCH] The underline character should not be displayed + +Fix anaconda showing "1 _storage device selected" on the advanced storage spoke. +The call was accidentally? removed in 7b7616f +--- + pyanaconda/ui/gui/spokes/advanced_storage.py | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/pyanaconda/ui/gui/spokes/advanced_storage.py b/pyanaconda/ui/gui/spokes/advanced_storage.py +index 88405894e..e3ad33147 100644 +--- a/pyanaconda/ui/gui/spokes/advanced_storage.py ++++ b/pyanaconda/ui/gui/spokes/advanced_storage.py +@@ -632,6 +632,7 @@ class FilterSpoke(NormalSpoke): + if count > 0: + really_show(summary_button) + label.set_text(summary) ++ label.set_use_underline(True) + else: + really_hide(summary_button) + +-- +2.23.0 + diff --git a/bugfix-add-dnf-transaction-timeout.patch b/bugfix-add-dnf-transaction-timeout.patch new file mode 100644 index 0000000..542c810 --- /dev/null +++ b/bugfix-add-dnf-transaction-timeout.patch @@ -0,0 +1,52 @@ +From d1bb8d1d49de9668e8afc697aef8166d6c5bfabe Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Fri, 25 Sep 2020 23:16:04 +0800 +Subject: [PATCH] add dnf transaction timeout + +--- + pyanaconda/core/constants.py | 3 +++ + pyanaconda/payload/dnf/payload.py | 7 ++++++- + 2 files changed, 9 insertions(+), 1 deletion(-) + +diff --git a/pyanaconda/core/constants.py b/pyanaconda/core/constants.py +index 0e4cc15..607f96c 100644 +--- a/pyanaconda/core/constants.py ++++ b/pyanaconda/core/constants.py +@@ -448,6 +448,9 @@ URL_TYPE_BASEURL = "BASEURL" + URL_TYPE_MIRRORLIST = "MIRRORLIST" + URL_TYPE_METALINK = "METALINK" + ++#DNF trasactions timeout ++DNF_TRANSACTIONS_TIMEOUT = 1800 ++ + # The default source for the DNF payload. + DNF_DEFAULT_SOURCE_TYPE = SOURCE_TYPE_CLOSEST_MIRROR + +diff --git a/pyanaconda/payload/dnf/payload.py b/pyanaconda/payload/dnf/payload.py +index f927208..08963cc 100644 +--- a/pyanaconda/payload/dnf/payload.py ++++ b/pyanaconda/payload/dnf/payload.py +@@ -19,6 +19,7 @@ + import configparser + import functools + import multiprocessing ++import queue + import os + import shutil + import sys +@@ -1356,7 +1357,11 @@ class DNFPayload(Payload): + if errors.errorHandler.cb(exc) == errors.ERROR_RAISE: + log.error("Installation failed: %r", exc) + go_to_failure_limbo() +- (token, msg) = queue_instance.get() ++ try: ++ (token, msg) = queue_instance.get(True, constants.DNF_TRANSACTIONS_TIMEOUT) ++ except queue.Empty: ++ msg = ("Payload error - DNF installation has timeouted") ++ raise PayloadError(msg) + + process.join() + # Don't close the mother base here, because we still need it. +-- +2.23.0 + diff --git a/bugfix-add-kdump-parameter-into-kernel-cmdline.patch b/bugfix-add-kdump-parameter-into-kernel-cmdline.patch new file mode 100644 index 0000000..6f2ec87 --- /dev/null +++ b/bugfix-add-kdump-parameter-into-kernel-cmdline.patch @@ -0,0 +1,42 @@ +From e8e0d299420138e95dae58fcd4b5669e72217947 Mon Sep 17 00:00:00 2001 +From: Jialong Chen +Date: Tue, 14 Jul 2020 21:45:01 +0800 +Subject: [PATCH] add kdump parameter into kernel cmdline + +--- + pyanaconda/modules/storage/bootloader/grub2.py | 12 ++++++++++-- + 1 file changed, 10 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/modules/storage/bootloader/grub2.py b/pyanaconda/modules/storage/bootloader/grub2.py +index c6b7db4..57fb2e5 100644 +--- a/pyanaconda/modules/storage/bootloader/grub2.py ++++ b/pyanaconda/modules/storage/bootloader/grub2.py +@@ -17,6 +17,7 @@ + # + import os + import re ++import blivet + from _ped import PARTITION_BIOS_GRUB + + from blivet.devicelibs import raid +@@ -266,8 +267,15 @@ class GRUB2(BootLoader): + # this is going to cause problems for systems containing multiple + # linux installations or even multiple boot entries with different + # boot arguments +- log.info("bootloader.py: used boot args: %s ", self.boot_args) +- defaults.write("GRUB_CMDLINE_LINUX=\"%s\"\n" % self.boot_args) ++ arg_str = "%s" % self.boot_args ++ if blivet.arch.is_aarch64(): ++ log.info("check boot args:%s", arg_str) ++ arg_str += " crashkernel=1024M,high" ++ else: ++ arg_str += " crashkernel=512M" ++ ++ log.info("bootloader.py: used boot args: %s ", arg_str) ++ defaults.write("GRUB_CMDLINE_LINUX=\"%s\"\n" % arg_str) + defaults.write("GRUB_DISABLE_RECOVERY=\"true\"\n") + #defaults.write("GRUB_THEME=\"/boot/grub2/themes/system/theme.txt\"\n") + +-- +1.8.3.1 + diff --git a/bugfix-add-tests-for-verify-valid-installtree-function.patch b/bugfix-add-tests-for-verify-valid-installtree-function.patch new file mode 100644 index 0000000..cae9609 --- /dev/null +++ b/bugfix-add-tests-for-verify-valid-installtree-function.patch @@ -0,0 +1,58 @@ +From 5dc9b2ee4dde7b6deb477b581759c5f76dcb87b5 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Fri, 19 Jun 2020 15:57:57 +0200 +Subject: [PATCH] Add tests for verify_valid_installtree function (#1844287) + +It's testing if repodata/repomd.xml file exists. + +Related: rhbz#1844287 +Related: rhbz#1849093 +--- + .../module_source_base_test.py | 23 ++++++++++++++++++- + 1 file changed, 22 insertions(+), 1 deletion(-) + +diff --git a/tests/nosetests/pyanaconda_tests/module_source_base_test.py b/tests/nosetests/pyanaconda_tests/module_source_base_test.py +index f58e2b1639..3ba40edf4c 100644 +--- a/tests/nosetests/pyanaconda_tests/module_source_base_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_source_base_test.py +@@ -16,6 +16,8 @@ + # Red Hat, Inc. + # + import unittest ++from pathlib import Path ++from tempfile import TemporaryDirectory + from unittest.mock import patch + + from pyanaconda.core.constants import INSTALL_TREE +@@ -23,7 +25,8 @@ + from pyanaconda.modules.payloads.constants import SourceType + from pyanaconda.modules.payloads.source.mount_tasks import SetUpMountTask, TearDownMountTask + from pyanaconda.modules.payloads.source.source_base import MountingSourceMixin +-from pyanaconda.modules.payloads.source.utils import find_and_mount_iso_image ++from pyanaconda.modules.payloads.source.utils import find_and_mount_iso_image, \ ++ verify_valid_installtree + + mount_location = "/some/dir" + +@@ -184,3 +187,21 @@ def find_and_mount_iso_image_fail_mount_test(self, + ) + + self.assertEqual(iso_name, "") ++ ++ def verify_valid_installtree_success_test(self): ++ """Test verify_valid_installtree functionality success.""" ++ with TemporaryDirectory() as tmp: ++ repodir_path = Path(tmp, "repodata") ++ repodir_path.mkdir() ++ repomd_path = Path(repodir_path, "repomd.xml") ++ repomd_path.write_text("This is a cool repomd file!") ++ ++ self.assertTrue(verify_valid_installtree(tmp)) ++ ++ def verify_valid_installtree_failed_test(self): ++ """Test verify_valid_installtree functionality failed.""" ++ with TemporaryDirectory() as tmp: ++ repodir_path = Path(tmp, "repodata") ++ repodir_path.mkdir() ++ ++ self.assertFalse(verify_valid_installtree(tmp)) diff --git a/bugfix-do-not-mount-dbus-source.patch b/bugfix-do-not-mount-dbus-source.patch new file mode 100644 index 0000000..e91381b --- /dev/null +++ b/bugfix-do-not-mount-dbus-source.patch @@ -0,0 +1,84 @@ +From 5be52e7c3122634a1d7b011922356788315e22ec Mon Sep 17 00:00:00 2001 +From: root +Date: Sun, 25 Apr 2021 22:27:18 +0800 +Subject: [PATCH] patch + +--- + pyanaconda/core/constants.py | 1 + + pyanaconda/modules/payloads/source/cdrom/initialization.py | 3 ++- + pyanaconda/modules/payloads/source/utils.py | 4 ++-- + tests/nosetests/pyanaconda_tests/module_source_base_test.py | 3 +-- + 4 files changed, 6 insertions(+), 5 deletions(-) + +diff --git a/pyanaconda/core/constants.py b/pyanaconda/core/constants.py +index 4fc4b80..4724dc1 100644 +--- a/pyanaconda/core/constants.py ++++ b/pyanaconda/core/constants.py +@@ -53,6 +53,7 @@ DRACUT_ISODIR = "/run/install/source" + ISO_DIR = MOUNT_DIR + "/isodir" + IMAGE_DIR = MOUNT_DIR + "/image" + INSTALL_TREE = MOUNT_DIR + "/source" ++SOURCES_DIR = MOUNT_DIR + "/sources" + BASE_REPO_NAME = "anaconda" + + # Get list of repo names witch should be used as base repo +diff --git a/pyanaconda/modules/payloads/source/cdrom/initialization.py b/pyanaconda/modules/payloads/source/cdrom/initialization.py +index 7fc38fc..95303ea 100644 +--- a/pyanaconda/modules/payloads/source/cdrom/initialization.py ++++ b/pyanaconda/modules/payloads/source/cdrom/initialization.py +@@ -98,7 +98,8 @@ class SetUpCdromSourceTask(SetUpMountTask): + try: + device_data = DeviceData.from_structure(device_tree.GetDeviceData(dev_name)) + mount(device_data.path, self._target_mount, "iso9660", "ro") +- except PayloadSetupError: ++ except PayloadSetupError as e: ++ log.debug("Failed to mount %s: %s", dev_name, str(e)) + continue + + if is_valid_install_disk(self._target_mount): +diff --git a/pyanaconda/modules/payloads/source/utils.py b/pyanaconda/modules/payloads/source/utils.py +index a8e2f49..2dc4062 100644 +--- a/pyanaconda/modules/payloads/source/utils.py ++++ b/pyanaconda/modules/payloads/source/utils.py +@@ -20,7 +20,7 @@ import os.path + from blivet.arch import get_arch + from blivet.util import mount + +-from pyanaconda.core.constants import INSTALL_TREE ++from pyanaconda.core.constants import SOURCES_DIR + from pyanaconda.core.storage import device_matches + from pyanaconda.core.util import join_paths + from pyanaconda.payload.image import find_first_iso_image +@@ -177,7 +177,7 @@ class MountPointGenerator: + :rtype: str + """ + path = "{}/mount-{:0>4}-{}".format( +- INSTALL_TREE, ++ SOURCES_DIR, + cls._counter, + suffix + ) +diff --git a/tests/nosetests/pyanaconda_tests/module_source_base_test.py b/tests/nosetests/pyanaconda_tests/module_source_base_test.py +index c9f00fa..2d1a1da 100644 +--- a/tests/nosetests/pyanaconda_tests/module_source_base_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_source_base_test.py +@@ -20,7 +20,6 @@ from pathlib import Path + from tempfile import TemporaryDirectory + from unittest.mock import patch + +-from pyanaconda.core.constants import INSTALL_TREE + from pyanaconda.modules.common.errors.payload import SourceSetupError, SourceTearDownError + from pyanaconda.modules.payloads.constants import SourceType + from pyanaconda.modules.payloads.source.mount_tasks import SetUpMountTask, TearDownMountTask +@@ -55,7 +54,7 @@ class MountingSourceMixinTestCase(unittest.TestCase): + def counter_test(self): + """Mount path in mount source base gets incremental numbers.""" + module = DummyMountingSourceSubclass() +- self.assertTrue(module.mount_point.startswith(INSTALL_TREE + "/mount-")) ++ self.assertTrue(module.mount_point.startswith("/run/install/sources/mount-")) + first_counter = int(module.mount_point.split("-")[1]) + + module = DummyMountingSourceSubclass() +-- +2.27.0 + diff --git a/bugfix-do-not-test-if-repo-is-valid-based-on-treeinfo-file.patch b/bugfix-do-not-test-if-repo-is-valid-based-on-treeinfo-file.patch new file mode 100644 index 0000000..2da320b --- /dev/null +++ b/bugfix-do-not-test-if-repo-is-valid-based-on-treeinfo-file.patch @@ -0,0 +1,54 @@ +From 6cee8e5a59a9c424d2bc79b5474a749c4f786b40 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Fri, 19 Jun 2020 14:12:21 +0200 +Subject: [PATCH] Do not test if repo is valid based on .treeinfo file + (#1844287) + +Not all repositories need to have .treeinfo file. When it is not a compose but +only a third party repo it's probably created by just running createrepo_c which +does not create this file. We do not want to disable these repositories. + +So instead check that repodata/repomd.xml file is present. Based on my +discussion with DNF/RPM developers it seems like the best approach. + +Resolves: rhbz#1844287 +Resolves: rhbz#1849093 + +Reported-by: Adam Williamson +--- + pyanaconda/payload/image.py | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/pyanaconda/payload/image.py b/pyanaconda/payload/image.py +index b76b33db40..4b6d0c7bb9 100644 +--- a/pyanaconda/payload/image.py ++++ b/pyanaconda/payload/image.py +@@ -28,6 +28,7 @@ + from blivet.size import Size + + from pyanaconda import isys ++from pyanaconda.core.util import join_paths + from pyanaconda.errors import errorHandler, ERROR_RAISE, InvalidImageSizeError, MissingImageError + from pyanaconda.modules.common.constants.objects import DEVICE_TREE + from pyanaconda.modules.common.constants.services import STORAGE +@@ -129,16 +130,15 @@ def find_first_iso_image(path, mount_path="/mnt/install/cdimage"): + + + def verify_valid_installtree(path): +- """Check if the given path is a valid installtree repository ++ """Check if the given path is a valid installtree repository. + + :param str path: install tree path + :returns: True if repository is valid false otherwise + :rtype: bool + """ +- # TODO: This can be enhanced to check for repodata folder. +- if os.path.exists(os.path.join(path, ".treeinfo")): +- return True +- elif os.path.exists(os.path.join(path, "treeinfo")): ++ repomd_path = join_paths(path, "repodata/repomd.xml") ++ ++ if os.path.exists(repomd_path) and os.path.isfile(repomd_path): + return True + + return False diff --git a/bugfix-fix-password-policy.patch b/bugfix-fix-password-policy.patch new file mode 100644 index 0000000..0f98579 --- /dev/null +++ b/bugfix-fix-password-policy.patch @@ -0,0 +1,29 @@ +From 19264b192083d5cf38750a9cef0ec0a55eea3cfe Mon Sep 17 00:00:00 2001 +From: zhangqiumiao +Date: Thu, 10 Sep 2020 14:59:14 +0800 +Subject: [PATCH] bugfix fix password policy + +--- + data/interactive-defaults.ks | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/data/interactive-defaults.ks b/data/interactive-defaults.ks +index 0177cf9..7e43a39 100644 +--- a/data/interactive-defaults.ks ++++ b/data/interactive-defaults.ks +@@ -4,9 +4,9 @@ firstboot --enable + + %anaconda + # Default password policies +-pwpolicy root --notstrict --minlen=8 --minquality=1 --nochanges --notempty +-pwpolicy user --notstrict --minlen=8 --minquality=1 --nochanges --emptyok +-pwpolicy luks --notstrict --minlen=8 --minquality=1 --nochanges --notempty ++pwpolicy root --strict --minlen=8 --minquality=1 --nochanges --notempty ++pwpolicy user --strict --minlen=8 --minquality=1 --nochanges --emptyok ++pwpolicy luks --strict --minlen=8 --minquality=1 --nochanges --notempty + # NOTE: This applies only to *fully* interactive installations, partial kickstart + # installations use defaults specified in pyanaconda/pwpolicy.py. + # Automated kickstart installs simply ignore the password policy as the policy +-- +1.8.3.1 + diff --git a/bugfix-logo-display-in-low-screen-resolution.patch b/bugfix-logo-display-in-low-screen-resolution.patch new file mode 100644 index 0000000..292e250 --- /dev/null +++ b/bugfix-logo-display-in-low-screen-resolution.patch @@ -0,0 +1,24 @@ +From 3e4a2bd7fd23c458a96b387c4df9f4abb984d59f Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Thu, 18 Jun 2020 22:30:28 +0800 +Subject: [PATCH] bugfix logo display in low screen resolution + +--- + data/anaconda-gtk.css | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/data/anaconda-gtk.css b/data/anaconda-gtk.css +index c47bb87..7b7166b 100644 +--- a/data/anaconda-gtk.css ++++ b/data/anaconda-gtk.css +@@ -113,6 +113,7 @@ infobar.error { + .logo { + background-image: url('/usr/share/anaconda/pixmaps/sidebar-logo.png'); + background-position: 50% 20px; ++ background-size: 90%; + background-repeat: no-repeat; + background-color: transparent; + } +-- +2.23.0 + diff --git a/bugfix-move-verify-valid-installtree-to-source-module-utils.patch b/bugfix-move-verify-valid-installtree-to-source-module-utils.patch new file mode 100644 index 0000000..204a867 --- /dev/null +++ b/bugfix-move-verify-valid-installtree-to-source-module-utils.patch @@ -0,0 +1,107 @@ +From fea8f2db7594482457f1a7f7aebb7ccac4505fa5 Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Fri, 19 Jun 2020 15:36:37 +0200 +Subject: [PATCH] Move verify_valid_installtree to source module utils + (#1844287) + +It's used only by modules now. We can safely move it. + +Related: rhbz#1844287 +Related: rhbz#1849093 +--- + .../payloads/source/harddrive/initialization.py | 3 +-- + .../payloads/source/nfs/initialization.py | 4 ++-- + pyanaconda/modules/payloads/source/utils.py | 15 +++++++++++++++ + pyanaconda/payload/image.py | 16 ---------------- + 4 files changed, 18 insertions(+), 20 deletions(-) + +diff --git a/pyanaconda/modules/payloads/source/harddrive/initialization.py b/pyanaconda/modules/payloads/source/harddrive/initialization.py +index 38d777adca..ed77db6bc9 100644 +--- a/pyanaconda/modules/payloads/source/harddrive/initialization.py ++++ b/pyanaconda/modules/payloads/source/harddrive/initialization.py +@@ -22,8 +22,7 @@ + from pyanaconda.modules.common.errors.payload import SourceSetupError + from pyanaconda.modules.common.task import Task + from pyanaconda.modules.payloads.source.utils import find_and_mount_device, \ +- find_and_mount_iso_image +-from pyanaconda.payload.image import verify_valid_installtree ++ find_and_mount_iso_image, verify_valid_installtree + from pyanaconda.payload.utils import unmount + from pyanaconda.anaconda_loggers import get_module_logger + +diff --git a/pyanaconda/modules/payloads/source/nfs/initialization.py b/pyanaconda/modules/payloads/source/nfs/initialization.py +index 00112c3ecb..56e95060c6 100644 +--- a/pyanaconda/modules/payloads/source/nfs/initialization.py ++++ b/pyanaconda/modules/payloads/source/nfs/initialization.py +@@ -21,9 +21,9 @@ + from pyanaconda.core.payload import parse_nfs_url + from pyanaconda.modules.common.errors.payload import SourceSetupError + from pyanaconda.modules.common.task import Task +-from pyanaconda.modules.payloads.source.utils import find_and_mount_iso_image ++from pyanaconda.modules.payloads.source.utils import find_and_mount_iso_image, \ ++ verify_valid_installtree + from pyanaconda.payload.errors import PayloadSetupError +-from pyanaconda.payload.image import verify_valid_installtree + from pyanaconda.payload.utils import mount, unmount + + log = get_module_logger(__name__) +diff --git a/pyanaconda/modules/payloads/source/utils.py b/pyanaconda/modules/payloads/source/utils.py +index b9642a945c..ed9e5da49b 100644 +--- a/pyanaconda/modules/payloads/source/utils.py ++++ b/pyanaconda/modules/payloads/source/utils.py +@@ -148,6 +148,21 @@ def _create_iso_path(path, iso_name): + return path + + ++def verify_valid_installtree(path): ++ """Check if the given path is a valid installtree repository. ++ ++ :param str path: install tree path ++ :returns: True if repository is valid false otherwise ++ :rtype: bool ++ """ ++ repomd_path = join_paths(path, "repodata/repomd.xml") ++ ++ if os.path.exists(repomd_path) and os.path.isfile(repomd_path): ++ return True ++ ++ return False ++ ++ + class MountPointGenerator: + _counter = 0 + +diff --git a/pyanaconda/payload/image.py b/pyanaconda/payload/image.py +index 4b6d0c7bb9..9401e29388 100644 +--- a/pyanaconda/payload/image.py ++++ b/pyanaconda/payload/image.py +@@ -28,7 +28,6 @@ + from blivet.size import Size + + from pyanaconda import isys +-from pyanaconda.core.util import join_paths + from pyanaconda.errors import errorHandler, ERROR_RAISE, InvalidImageSizeError, MissingImageError + from pyanaconda.modules.common.constants.objects import DEVICE_TREE + from pyanaconda.modules.common.constants.services import STORAGE +@@ -129,21 +128,6 @@ def find_first_iso_image(path, mount_path="/mnt/install/cdimage"): + return None + + +-def verify_valid_installtree(path): +- """Check if the given path is a valid installtree repository. +- +- :param str path: install tree path +- :returns: True if repository is valid false otherwise +- :rtype: bool +- """ +- repomd_path = join_paths(path, "repodata/repomd.xml") +- +- if os.path.exists(repomd_path) and os.path.isfile(repomd_path): +- return True +- +- return False +- +- + def _check_repodata(mount_path): + install_tree_meta = InstallTreeMetadata() + if not install_tree_meta.load_file(mount_path): diff --git a/bugfix-network-add-timeout-for-synchronous-activation-of-a-.patch b/bugfix-network-add-timeout-for-synchronous-activation-of-a-.patch new file mode 100644 index 0000000..bcf8896 --- /dev/null +++ b/bugfix-network-add-timeout-for-synchronous-activation-of-a-.patch @@ -0,0 +1,59 @@ +From 7ab2db63f7d5f30035d6db2ec2a86a156c50d2f6 Mon Sep 17 00:00:00 2001 +From: Radek Vykydal +Date: Wed, 19 Aug 2020 13:50:48 +0200 +Subject: [PATCH] network: add timeout for synchronous activation of a + connection + +Related: rhbz#1869323 +--- + pyanaconda/modules/network/constants.py | 1 + + pyanaconda/modules/network/nm_client.py | 13 ++++++++++--- + 2 files changed, 11 insertions(+), 3 deletions(-) + +diff --git a/pyanaconda/modules/network/constants.py b/pyanaconda/modules/network/constants.py +index 33c99d76e..530a8e281 100644 +--- a/pyanaconda/modules/network/constants.py ++++ b/pyanaconda/modules/network/constants.py +@@ -24,6 +24,7 @@ from pyanaconda.core.constants import FIREWALL_DEFAULT, FIREWALL_DISABLED, \ + + + NM_CONNECTION_UUID_LENGTH = 36 ++CONNECTION_ACTIVATION_TIMEOUT = 45 + + + @unique +diff --git a/pyanaconda/modules/network/nm_client.py b/pyanaconda/modules/network/nm_client.py +index 5e1fb854e..2f5703e76 100644 +--- a/pyanaconda/modules/network/nm_client.py ++++ b/pyanaconda/modules/network/nm_client.py +@@ -23,9 +23,10 @@ gi.require_version("NM", "1.0") + from gi.repository import NM + + import socket +-from queue import Queue ++from queue import Queue, Empty + from pykickstart.constants import BIND_TO_MAC +-from pyanaconda.modules.network.constants import NM_CONNECTION_UUID_LENGTH ++from pyanaconda.modules.network.constants import NM_CONNECTION_UUID_LENGTH, \ ++ CONNECTION_ACTIVATION_TIMEOUT + from pyanaconda.modules.network.kickstart import default_ks_vlan_interface_name + from pyanaconda.modules.network.utils import is_s390, get_s390_settings, netmask2prefix, \ + prefix2netmask +@@ -939,7 +940,13 @@ def activate_connection_sync(nm_client, connection, device): + sync_queue + ) + +- return sync_queue.get() ++ try: ++ ret = sync_queue.get(timeout=CONNECTION_ACTIVATION_TIMEOUT) ++ except Empty: ++ log.error("Activation of a connection timed out.") ++ ret = None ++ ++ return ret + + + def get_dracut_arguments_from_connection(nm_client, connection, iface, target_ip, +-- +2.23.0 + diff --git a/bugfix-network-do-not-crash-when-updating-a-connection-with.patch b/bugfix-network-do-not-crash-when-updating-a-connection-with.patch new file mode 100644 index 0000000..5ac9b42 --- /dev/null +++ b/bugfix-network-do-not-crash-when-updating-a-connection-with.patch @@ -0,0 +1,41 @@ +From a3e46c49216f76f73097587b15ded52b253ce3d2 Mon Sep 17 00:00:00 2001 +From: Radek Vykydal +Date: Wed, 26 Aug 2020 09:52:24 +0200 +Subject: [PATCH] network: do not crash when updating a connection without + wired settings + +One of the Anaconda fallouts of NM defaulting to keyfiles. +Hit by team-pre and five other -pre kickstart tests. +--- + pyanaconda/modules/network/initialization.py | 15 ++++++++------- + 1 file changed, 8 insertions(+), 7 deletions(-) + +diff --git a/pyanaconda/modules/network/initialization.py b/pyanaconda/modules/network/initialization.py +index 5e33d0494..b27a46976 100644 +--- a/pyanaconda/modules/network/initialization.py ++++ b/pyanaconda/modules/network/initialization.py +@@ -411,13 +411,14 @@ class DumpMissingIfcfgFilesTask(Task): + s_con.set_property(NM.SETTING_CONNECTION_ID, iface) + s_con.set_property(NM.SETTING_CONNECTION_INTERFACE_NAME, iface) + s_wired = con.get_setting_wired() +- # By default connections are bound to interface name +- s_wired.set_property(NM.SETTING_WIRED_MAC_ADDRESS, None) +- bound_mac = bound_hwaddr_of_device(self._nm_client, iface, self._ifname_option_values) +- if bound_mac: +- s_wired.set_property(NM.SETTING_WIRED_MAC_ADDRESS, bound_mac) +- log.debug("%s: iface %s bound to mac address %s by ifname boot option", +- self.name, iface, bound_mac) ++ if s_wired: ++ # By default connections are bound to interface name ++ s_wired.set_property(NM.SETTING_WIRED_MAC_ADDRESS, None) ++ bound_mac = bound_hwaddr_of_device(self._nm_client, iface, self._ifname_option_values) ++ if bound_mac: ++ s_wired.set_property(NM.SETTING_WIRED_MAC_ADDRESS, bound_mac) ++ log.debug("%s: iface %s bound to mac address %s by ifname boot option", ++ self.name, iface, bound_mac) + + @guard_by_system_configuration(return_value=[]) + def run(self): +-- +2.23.0 + diff --git a/bugfix-network-do-not-try-to-activate-connection-that-has-n.patch b/bugfix-network-do-not-try-to-activate-connection-that-has-n.patch new file mode 100644 index 0000000..cafc43a --- /dev/null +++ b/bugfix-network-do-not-try-to-activate-connection-that-has-n.patch @@ -0,0 +1,30 @@ +From 6ffa54ed7ca56047b26f05cc6b9967820fa5dc21 Mon Sep 17 00:00:00 2001 +From: Radek Vykydal +Date: Wed, 19 Aug 2020 13:51:14 +0200 +Subject: [PATCH] network: do not try to activate connection that has not been + found + +Resolves: rhbz#1869323 +--- + pyanaconda/modules/network/nm_client.py | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/modules/network/nm_client.py b/pyanaconda/modules/network/nm_client.py +index 2f5703e76..acf6f7858 100644 +--- a/pyanaconda/modules/network/nm_client.py ++++ b/pyanaconda/modules/network/nm_client.py +@@ -785,8 +785,9 @@ def ensure_active_connection_for_device(nm_client, uuid, device_name, only_repla + active_uuid = ac.get_uuid() if ac else None + if uuid != active_uuid: + ifcfg_con = nm_client.get_connection_by_uuid(uuid) +- activate_connection_sync(nm_client, ifcfg_con, None) +- activated = True ++ if ifcfg_con: ++ activate_connection_sync(nm_client, ifcfg_con, None) ++ activated = True + msg = "activated" if activated else "not activated" + log.debug("ensure active ifcfg connection for %s (%s -> %s): %s", + device_name, active_uuid, uuid, msg) +-- +2.23.0 + diff --git a/bugfix-network-fix-configuration-of-virtual-devices-by-boot.patch b/bugfix-network-fix-configuration-of-virtual-devices-by-boot.patch new file mode 100644 index 0000000..0455f8d --- /dev/null +++ b/bugfix-network-fix-configuration-of-virtual-devices-by-boot.patch @@ -0,0 +1,58 @@ +From 787daf49b358fbe2d514012a708c28575fc8122b Mon Sep 17 00:00:00 2001 +From: Radek Vykydal +Date: Wed, 1 Jul 2020 11:25:37 +0200 +Subject: [PATCH] network: fix configuration of virtual devices by boot options + +The configuration was not passed to installed system via ifcfg files. + +Resolves: rhbz#1851218 +--- + pyanaconda/modules/network/initialization.py | 17 ++++++++++++----- + 1 file changed, 12 insertions(+), 5 deletions(-) + +diff --git a/pyanaconda/modules/network/initialization.py b/pyanaconda/modules/network/initialization.py +index de1ec851b..5e33d0494 100644 +--- a/pyanaconda/modules/network/initialization.py ++++ b/pyanaconda/modules/network/initialization.py +@@ -26,7 +26,8 @@ from pyanaconda.modules.network.nm_client import get_device_name_from_network_da + update_connection_values, commit_changes_with_autoconnection_blocked, is_ibft_connection + from pyanaconda.modules.network.ifcfg import get_ifcfg_file_of_device, find_ifcfg_uuid_of_device, \ + get_master_slaves_from_ifcfgs +-from pyanaconda.modules.network.device_configuration import supported_wired_device_types ++from pyanaconda.modules.network.device_configuration import supported_wired_device_types, \ ++ virtual_device_types + from pyanaconda.modules.network.utils import guard_by_system_configuration + + log = get_module_logger(__name__) +@@ -431,8 +432,9 @@ class DumpMissingIfcfgFilesTask(Task): + log.debug("%s: No NetworkManager available.", self.name) + return new_ifcfgs + ++ dumped_device_types = supported_wired_device_types + virtual_device_types + for device in self._nm_client.get_devices(): +- if device.get_device_type() not in supported_wired_device_types: ++ if device.get_device_type() not in dumped_device_types: + continue + + iface = device.get_iface() +@@ -446,9 +448,14 @@ class DumpMissingIfcfgFilesTask(Task): + + device_is_slave = any(con.get_setting_connection().get_master() for con in cons) + if device_is_slave: +- log.debug("%s: not creating default connection for slave device %s", +- self.name, iface) +- continue ++ # We have to dump persistent ifcfg files for slaves created in initramfs ++ if n_cons == 1 and self._is_initramfs_connection(cons[0], iface): ++ log.debug("%s: device %s has an initramfs slave connection", ++ self.name, iface) ++ else: ++ log.debug("%s: not creating default connection for slave device %s", ++ self.name, iface) ++ continue + + # Devices activated in initramfs should have ONBOOT=yes + has_initramfs_con = any(self._is_initramfs_connection(con, iface) for con in cons) +-- +2.23.0 + diff --git a/bugfix-network-fix-parsing-of-hostname-from-ip-if-mac-is-de.patch b/bugfix-network-fix-parsing-of-hostname-from-ip-if-mac-is-de.patch new file mode 100644 index 0000000..1420341 --- /dev/null +++ b/bugfix-network-fix-parsing-of-hostname-from-ip-if-mac-is-de.patch @@ -0,0 +1,95 @@ +From 0b4867eba60bbee4b8e1c1bd58966691dd1c2431 Mon Sep 17 00:00:00 2001 +From: Radek Vykydal +Date: Wed, 5 Aug 2020 16:35:34 +0200 +Subject: [PATCH] network: fix parsing of hostname from ip= if mac is defined + in dhcp + +Resolves: rhbz#1852560 +--- + pyanaconda/core/regexes.py | 3 +++ + pyanaconda/network.py | 13 ++++++++----- + tests/nosetests/pyanaconda_tests/network_test.py | 11 ++++++++--- + 3 files changed, 19 insertions(+), 8 deletions(-) + +diff --git a/pyanaconda/core/regexes.py b/pyanaconda/core/regexes.py +index 63ab668c9..ee5cc3765 100644 +--- a/pyanaconda/core/regexes.py ++++ b/pyanaconda/core/regexes.py +@@ -191,3 +191,6 @@ ZFCP_WWPN_NUMBER = re.compile(r'^(?:0x|)[0-9A-Fa-f]{16}$') + + # IPv6 address in dracut IP option (including the square brackets) + IPV6_ADDRESS_IN_DRACUT_IP_OPTION = re.compile(r'\[[^\]]+\]') ++ ++# Octet of MAC address ++MAC_OCTET = re.compile(r'[a-fA-F0-9][a-fA-F0-9]') +diff --git a/pyanaconda/network.py b/pyanaconda/network.py +index c66f35d44..7ba821fe4 100644 +--- a/pyanaconda/network.py ++++ b/pyanaconda/network.py +@@ -32,7 +32,7 @@ from pyanaconda.core import util, constants + from pyanaconda.core.i18n import _ + from pyanaconda.core.kernel import kernel_arguments + from pyanaconda.core.regexes import HOSTNAME_PATTERN_WITHOUT_ANCHORS, \ +- IPV6_ADDRESS_IN_DRACUT_IP_OPTION ++ IPV6_ADDRESS_IN_DRACUT_IP_OPTION, MAC_OCTET + from pyanaconda.core.configuration.anaconda import conf + from pyanaconda.core.constants import TIME_SOURCE_SERVER + from pyanaconda.modules.common.constants.services import NETWORK, TIMEZONE, STORAGE +@@ -209,7 +209,7 @@ def hostname_from_cmdline(kernel_args): + """ + # legacy hostname= option + hostname = kernel_args.get('hostname', "") +- # ip= option ++ # ip= option (man dracut.cmdline) + ipopts = kernel_args.get('ip') + # Example (2 options): + # ens3:dhcp 10.34.102.244::10.34.102.54:255.255.255.0:myhostname:ens9:none +@@ -219,10 +219,13 @@ def hostname_from_cmdline(kernel_args): + # Replace ipv6 addresses with empty string, example of ipv6 config: + # [fd00:10:100::84:5]::[fd00:10:100::86:49]:80:myhostname:ens9:none + ipopt = IPV6_ADDRESS_IN_DRACUT_IP_OPTION.sub('', ipopt) +- try: ++ elements = ipopt.split(':') ++ # Hostname can be defined only in option having more than 6 elements. ++ # But filter out auto ip= with mac address set by MAC_OCTET matching, eg: ++ # ip=:dhcp::52:54:00:12:34:56 ++ # where the 4th element is not hostname. ++ if len(elements) > 6 and not re.match(MAC_OCTET, elements[6]): + hostname = ipopt.split(':')[4] +- except IndexError: +- pass + return hostname + + +diff --git a/tests/nosetests/pyanaconda_tests/network_test.py b/tests/nosetests/pyanaconda_tests/network_test.py +index e7ca630a7..161e883ff 100644 +--- a/tests/nosetests/pyanaconda_tests/network_test.py ++++ b/tests/nosetests/pyanaconda_tests/network_test.py +@@ -233,9 +233,11 @@ class NetworkTests(unittest.TestCase): + cmdline = {"ip": "10.34.102.244::10.34.102.54:255.255.255.0:myhostname:ens9:none", + "hostname": "hostname_bootopt"} + self.assertEqual(network.hostname_from_cmdline(cmdline), "myhostname") +- cmdline = {"ip": "ens3:dhcp "} ++ cmdline = {"ip": "ens3:dhcp"} + self.assertEqual(network.hostname_from_cmdline(cmdline), "") +- cmdline = {"ip": "ens3:dhcp ", ++ cmdline = {"ip": "ens3:dhcp:1500"} ++ self.assertEqual(network.hostname_from_cmdline(cmdline), "") ++ cmdline = {"ip": "ens3:dhcp", + "hostname": "hostname_bootopt"} + self.assertEqual(network.hostname_from_cmdline(cmdline), "hostname_bootopt") + # two ip configurations +@@ -248,6 +250,9 @@ class NetworkTests(unittest.TestCase): + self.assertEqual(network.hostname_from_cmdline(cmdline), "myhostname") + cmdline = {"ip": "[fd00:10:100::84:5]::[fd00:10:100::86:49]:80::ens50:none"} + self.assertEqual(network.hostname_from_cmdline(cmdline), "") +- cmdline = {"ip": "[fd00:10:100::84:5]::[fd00:10:100::86:49]:80::ens50:none" ++ cmdline = {"ip": "[fd00:10:100::84:5]::[fd00:10:100::86:49]:80::ens50:none " + "ens3:dhcp 10.34.102.244::10.34.102.54:255.255.255.0:myhostname:ens9:none"} + self.assertEqual(network.hostname_from_cmdline(cmdline), "myhostname") ++ # automatic ip= whith MAC address set ++ cmdline = {"ip": "ens3:dhcp::52:54:00:12:34:56"} ++ self.assertEqual(network.hostname_from_cmdline(cmdline), "") +-- +2.23.0 + diff --git a/bugfix-network-get-hwadddr-when-binding-to-mac-more-robustl.patch b/bugfix-network-get-hwadddr-when-binding-to-mac-more-robustl.patch new file mode 100644 index 0000000..d202dd9 --- /dev/null +++ b/bugfix-network-get-hwadddr-when-binding-to-mac-more-robustl.patch @@ -0,0 +1,29 @@ +From 72c5df1cb3ee8636fc6901aaf6192f7e147ed399 Mon Sep 17 00:00:00 2001 +From: Radek Vykydal +Date: Wed, 9 Sep 2020 15:36:35 +0200 +Subject: [PATCH] network: get hwadddr when binding to mac more robustly + +--- + pyanaconda/modules/network/nm_client.py | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/pyanaconda/modules/network/nm_client.py b/pyanaconda/modules/network/nm_client.py +index 4473c0110..04ae78d90 100644 +--- a/pyanaconda/modules/network/nm_client.py ++++ b/pyanaconda/modules/network/nm_client.py +@@ -695,7 +695,11 @@ def bind_settings_to_mac(nm_client, s_connection, s_wired, device_name=None, bin + return False + device = nm_client.get_device_by_iface(iface) + if device: +- hwaddr = device.get_permanent_hw_address() or device.get_hw_address() ++ try: ++ perm_hwaddr = device.get_permanent_hw_address() ++ except AttributeError: ++ perm_hwaddr = None ++ hwaddr = perm_hwaddr or device.get_hw_address() + s_wired.props.mac_address = hwaddr + log.debug("Bind to mac: bound to %s", hwaddr) + modified = True +-- +2.23.0 + diff --git a/bugfix-rename-function-for-a-simple-check-for-DNF-repository.patch b/bugfix-rename-function-for-a-simple-check-for-DNF-repository.patch new file mode 100644 index 0000000..1b13069 --- /dev/null +++ b/bugfix-rename-function-for-a-simple-check-for-DNF-repository.patch @@ -0,0 +1,228 @@ +From 92d8d9d3e39eae8e268e3aeff096105b441bbeae Mon Sep 17 00:00:00 2001 +From: Jiri Konecny +Date: Mon, 22 Jun 2020 13:12:53 +0200 +Subject: [PATCH] Rename function for a simple check for DNF repository + +It's more clear to use repository in the name than installtree. + +Related: rhbz#1844287 +Related: rhbz#1849093 +--- + .../payloads/source/harddrive/initialization.py | 4 ++-- + .../modules/payloads/source/nfs/initialization.py | 4 ++-- + pyanaconda/modules/payloads/source/utils.py | 6 +++--- + .../pyanaconda_tests/module_source_base_test.py | 14 +++++++------- + .../module_source_harddrive_test.py | 12 ++++++------ + .../pyanaconda_tests/module_source_nfs_test.py | 12 ++++++------ + 6 files changed, 26 insertions(+), 26 deletions(-) + +diff --git a/pyanaconda/modules/payloads/source/harddrive/initialization.py b/pyanaconda/modules/payloads/source/harddrive/initialization.py +index ed77db6bc9..004df4f034 100644 +--- a/pyanaconda/modules/payloads/source/harddrive/initialization.py ++++ b/pyanaconda/modules/payloads/source/harddrive/initialization.py +@@ -22,7 +22,7 @@ + from pyanaconda.modules.common.errors.payload import SourceSetupError + from pyanaconda.modules.common.task import Task + from pyanaconda.modules.payloads.source.utils import find_and_mount_device, \ +- find_and_mount_iso_image, verify_valid_installtree ++ find_and_mount_iso_image, verify_valid_repository + from pyanaconda.payload.utils import unmount + from pyanaconda.anaconda_loggers import get_module_logger + +@@ -82,7 +82,7 @@ def run(self): + log.debug("Using the ISO '%s' mounted at '%s'.", iso_name, self._iso_mount) + return SetupHardDriveResult(self._iso_mount, iso_name) + +- if verify_valid_installtree(full_path_on_mounted_device): ++ if verify_valid_repository(full_path_on_mounted_device): + log.debug("Using the directory at '%s'.", full_path_on_mounted_device) + return SetupHardDriveResult(full_path_on_mounted_device, "") + +diff --git a/pyanaconda/modules/payloads/source/nfs/initialization.py b/pyanaconda/modules/payloads/source/nfs/initialization.py +index 56e95060c6..99601bf325 100644 +--- a/pyanaconda/modules/payloads/source/nfs/initialization.py ++++ b/pyanaconda/modules/payloads/source/nfs/initialization.py +@@ -22,7 +22,7 @@ + from pyanaconda.modules.common.errors.payload import SourceSetupError + from pyanaconda.modules.common.task import Task + from pyanaconda.modules.payloads.source.utils import find_and_mount_iso_image, \ +- verify_valid_installtree ++ verify_valid_repository + from pyanaconda.payload.errors import PayloadSetupError + from pyanaconda.payload.utils import mount, unmount + +@@ -65,7 +65,7 @@ def run(self): + log.debug("Using the ISO '%s' mounted at '%s'.", iso_name, self._iso_mount) + return self._iso_mount + +- if verify_valid_installtree(self._device_mount): ++ if verify_valid_repository(self._device_mount): + log.debug("Using the directory at '%s'.", self._device_mount) + return self._device_mount + +diff --git a/pyanaconda/modules/payloads/source/utils.py b/pyanaconda/modules/payloads/source/utils.py +index ed9e5da49b..84cdd33ca8 100644 +--- a/pyanaconda/modules/payloads/source/utils.py ++++ b/pyanaconda/modules/payloads/source/utils.py +@@ -148,10 +148,10 @@ def _create_iso_path(path, iso_name): + return path + + +-def verify_valid_installtree(path): +- """Check if the given path is a valid installtree repository. ++def verify_valid_repository(path): ++ """Check if the given path is a valid repository. + +- :param str path: install tree path ++ :param str path: path to the repository + :returns: True if repository is valid false otherwise + :rtype: bool + """ +diff --git a/tests/nosetests/pyanaconda_tests/module_source_base_test.py b/tests/nosetests/pyanaconda_tests/module_source_base_test.py +index 3ba40edf4c..c9f00fa4f5 100644 +--- a/tests/nosetests/pyanaconda_tests/module_source_base_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_source_base_test.py +@@ -26,7 +26,7 @@ + from pyanaconda.modules.payloads.source.mount_tasks import SetUpMountTask, TearDownMountTask + from pyanaconda.modules.payloads.source.source_base import MountingSourceMixin + from pyanaconda.modules.payloads.source.utils import find_and_mount_iso_image, \ +- verify_valid_installtree ++ verify_valid_repository + + mount_location = "/some/dir" + +@@ -188,20 +188,20 @@ def find_and_mount_iso_image_fail_mount_test(self, + + self.assertEqual(iso_name, "") + +- def verify_valid_installtree_success_test(self): +- """Test verify_valid_installtree functionality success.""" ++ def verify_valid_repository_success_test(self): ++ """Test verify_valid_repository functionality success.""" + with TemporaryDirectory() as tmp: + repodir_path = Path(tmp, "repodata") + repodir_path.mkdir() + repomd_path = Path(repodir_path, "repomd.xml") + repomd_path.write_text("This is a cool repomd file!") + +- self.assertTrue(verify_valid_installtree(tmp)) ++ self.assertTrue(verify_valid_repository(tmp)) + +- def verify_valid_installtree_failed_test(self): +- """Test verify_valid_installtree functionality failed.""" ++ def verify_valid_repository_failed_test(self): ++ """Test verify_valid_repository functionality failed.""" + with TemporaryDirectory() as tmp: + repodir_path = Path(tmp, "repodata") + repodir_path.mkdir() + +- self.assertFalse(verify_valid_installtree(tmp)) ++ self.assertFalse(verify_valid_repository(tmp)) +diff --git a/tests/nosetests/pyanaconda_tests/module_source_harddrive_test.py b/tests/nosetests/pyanaconda_tests/module_source_harddrive_test.py +index dff84b6d19..99be32fa1f 100644 +--- a/tests/nosetests/pyanaconda_tests/module_source_harddrive_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_source_harddrive_test.py +@@ -211,10 +211,10 @@ def success_find_iso_test(self, + return_value=True) + @patch("pyanaconda.modules.payloads.source.harddrive.initialization.find_and_mount_iso_image", + return_value="") +- @patch("pyanaconda.modules.payloads.source.harddrive.initialization.verify_valid_installtree", ++ @patch("pyanaconda.modules.payloads.source.harddrive.initialization.verify_valid_repository", + return_value=True) + def success_find_dir_test(self, +- verify_valid_installtree_mock, ++ verify_valid_repository_mock, + find_and_mount_iso_image_mock, + find_and_mount_device_mock): + """Hard drive source setup dir found.""" +@@ -228,7 +228,7 @@ def success_find_dir_test(self, + find_and_mount_iso_image_mock.assert_called_once_with( + device_mount_location + path_on_device, iso_mount_location + ) +- verify_valid_installtree_mock.assert_called_once_with( ++ verify_valid_repository_mock.assert_called_once_with( + device_mount_location + path_on_device + ) + self.assertEqual(result, SetupHardDriveResult(device_mount_location + path_on_device, "")) +@@ -237,12 +237,12 @@ def success_find_dir_test(self, + return_value=True) + @patch("pyanaconda.modules.payloads.source.harddrive.initialization.find_and_mount_iso_image", + return_value="") +- @patch("pyanaconda.modules.payloads.source.harddrive.initialization.verify_valid_installtree", ++ @patch("pyanaconda.modules.payloads.source.harddrive.initialization.verify_valid_repository", + return_value=False) + @patch("pyanaconda.modules.payloads.source.harddrive.initialization.unmount") + def failure_to_find_anything_test(self, + unmount_mock, +- verify_valid_installtree_mock, ++ verify_valid_repository_mock, + find_and_mount_iso_image_mock, + find_and_mount_device_mock): + """Hard drive source setup failure to find anything.""" +@@ -257,7 +257,7 @@ def failure_to_find_anything_test(self, + find_and_mount_iso_image_mock.assert_called_once_with( + device_mount_location + path_on_device, iso_mount_location + ) +- verify_valid_installtree_mock.assert_called_once_with( ++ verify_valid_repository_mock.assert_called_once_with( + device_mount_location + path_on_device + ) + unmount_mock.assert_called_once_with( +diff --git a/tests/nosetests/pyanaconda_tests/module_source_nfs_test.py b/tests/nosetests/pyanaconda_tests/module_source_nfs_test.py +index eb331dec10..d33796a469 100644 +--- a/tests/nosetests/pyanaconda_tests/module_source_nfs_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_source_nfs_test.py +@@ -180,7 +180,7 @@ def success_find_iso_test(self, + + self.assertEqual(result, iso_mount_location) + +- @patch("pyanaconda.modules.payloads.source.nfs.initialization.verify_valid_installtree", ++ @patch("pyanaconda.modules.payloads.source.nfs.initialization.verify_valid_repository", + return_value=True) + @patch("pyanaconda.modules.payloads.source.nfs.initialization.find_and_mount_iso_image", + return_value="") +@@ -188,7 +188,7 @@ def success_find_iso_test(self, + def success_find_dir_test(self, + mount_mock, + find_and_mount_iso_image_mock, +- verify_valid_installtree_mock): ++ verify_valid_repository_mock): + """Test NFS source setup find installation tree success""" + task = _create_setup_task() + result = task.run() +@@ -201,7 +201,7 @@ def success_find_dir_test(self, + find_and_mount_iso_image_mock.assert_called_once_with(device_mount_location, + iso_mount_location) + +- verify_valid_installtree_mock.assert_called_once_with(device_mount_location) ++ verify_valid_repository_mock.assert_called_once_with(device_mount_location) + + self.assertEqual(result, device_mount_location) + +@@ -252,7 +252,7 @@ def setup_install_source_task_mount_failure_test(self, mount_mock): + options="nolock") + + @patch("pyanaconda.modules.payloads.source.nfs.initialization.unmount") +- @patch("pyanaconda.modules.payloads.source.nfs.initialization.verify_valid_installtree", ++ @patch("pyanaconda.modules.payloads.source.nfs.initialization.verify_valid_repository", + return_value=False) + @patch("pyanaconda.modules.payloads.source.nfs.initialization.find_and_mount_iso_image", + return_value="") +@@ -260,7 +260,7 @@ def setup_install_source_task_mount_failure_test(self, mount_mock): + def setup_install_source_task_find_anything_failure_test(self, + mount_mock, + find_and_mount_iso_image_mock, +- verify_valid_installtree_mock, ++ verify_valid_repository_mock, + unmount_mock): + """Test NFS can't find anything to install from""" + task = SetUpNFSSourceTask(device_mount_location, iso_mount_location, nfs_url) +@@ -274,7 +274,7 @@ def setup_install_source_task_find_anything_failure_test(self, + find_and_mount_iso_image_mock.assert_called_once_with(device_mount_location, + iso_mount_location) + +- verify_valid_installtree_mock.assert_called_once_with(device_mount_location) ++ verify_valid_repository_mock.assert_called_once_with(device_mount_location) + + unmount_mock.assert_called_once_with( + device_mount_location diff --git a/bugfix-set-up-LD_PRELOAD-for-the-Storage-and-Services-module.patch b/bugfix-set-up-LD_PRELOAD-for-the-Storage-and-Services-module.patch new file mode 100644 index 0000000..674500a --- /dev/null +++ b/bugfix-set-up-LD_PRELOAD-for-the-Storage-and-Services-module.patch @@ -0,0 +1,65 @@ +From 769f395e80c92972900ef348d7dd747014666f70 Mon Sep 17 00:00:00 2001 +From: yu_boyun <1215979730@qq.com> +Date: Mon, 11 Jan 2021 17:01:58 +0800 +Subject: [PATCH] set up LD_PRELOAD for the Storage and Services module + +--- + data/dbus/org.fedoraproject.Anaconda.Modules.Services.service | 2 +- + data/dbus/org.fedoraproject.Anaconda.Modules.Storage.service | 2 +- + pyanaconda/modules/services/__main__.py | 4 ++++ + pyanaconda/modules/storage/__main__.py | 4 ++++ + 4 files changed, 10 insertions(+), 2 deletions(-) + +diff --git a/data/dbus/org.fedoraproject.Anaconda.Modules.Services.service b/data/dbus/org.fedoraproject.Anaconda.Modules.Services.service +index 79c6949..c3a6098 100644 +--- a/data/dbus/org.fedoraproject.Anaconda.Modules.Services.service ++++ b/data/dbus/org.fedoraproject.Anaconda.Modules.Services.service +@@ -1,4 +1,4 @@ + [D-BUS Service] + Name=org.fedoraproject.Anaconda.Modules.Services +-Exec=/usr/libexec/anaconda/start-module pyanaconda.modules.services ++Exec=/usr/libexec/anaconda/start-module --env LD_PRELOAD=libgomp.so.1 pyanaconda.modules.services + User=root +diff --git a/data/dbus/org.fedoraproject.Anaconda.Modules.Storage.service b/data/dbus/org.fedoraproject.Anaconda.Modules.Storage.service +index 018ecf1..780200e 100644 +--- a/data/dbus/org.fedoraproject.Anaconda.Modules.Storage.service ++++ b/data/dbus/org.fedoraproject.Anaconda.Modules.Storage.service +@@ -1,4 +1,4 @@ + [D-BUS Service] + Name=org.fedoraproject.Anaconda.Modules.Storage +-Exec=/usr/libexec/anaconda/start-module pyanaconda.modules.storage ++Exec=/usr/libexec/anaconda/start-module --env LD_PRELOAD=libgomp.so.1 pyanaconda.modules.storage + User=root +diff --git a/pyanaconda/modules/services/__main__.py b/pyanaconda/modules/services/__main__.py +index d4b0879..4327dc9 100644 +--- a/pyanaconda/modules/services/__main__.py ++++ b/pyanaconda/modules/services/__main__.py +@@ -20,6 +20,10 @@ + from pyanaconda.modules.common import init + init() + ++import os ++if "LD_PRELOAD" in os.environ: ++ del os.environ["LD_PRELOAD"] ++ + from pyanaconda.modules.services.services import ServicesService + service = ServicesService() + service.run() +diff --git a/pyanaconda/modules/storage/__main__.py b/pyanaconda/modules/storage/__main__.py +index 327a129..29212a9 100644 +--- a/pyanaconda/modules/storage/__main__.py ++++ b/pyanaconda/modules/storage/__main__.py +@@ -20,6 +20,10 @@ + from pyanaconda.modules.common import init + init("/tmp/storage.log") + ++import os ++if "LD_PRELOAD" in os.environ: ++ del os.environ["LD_PRELOAD"] ++ + # Initialize Blivet. + from pyanaconda.modules.storage.initialization import enable_installer_mode + enable_installer_mode() +-- +1.8.3.1 + diff --git a/change-inst-repo-default-value.patch b/change-inst-repo-default-value.patch new file mode 100644 index 0000000..469f9e6 --- /dev/null +++ b/change-inst-repo-default-value.patch @@ -0,0 +1,26 @@ +From e73464ef17f54743dc194ad28e32797a10e844a4 Mon Sep 17 00:00:00 2001 +From: ctyuncommiter05 +Date: Thu, 24 Jun 2021 16:30:45 +0800 +Subject: [PATCH] change inst.repo default value + +Solve the problem of U disk installation failure problem. +--- + anaconda.py | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/anaconda.py b/anaconda.py +index 1abdeb2..44b573c 100755 +--- a/anaconda.py ++++ b/anaconda.py +@@ -263,6 +263,8 @@ if __name__ == "__main__": + from pyanaconda.flags import flags + from pyanaconda.core.kernel import kernel_arguments + (opts, depr) = parse_arguments(boot_cmdline=kernel_arguments) ++ if not opts.method: ++ opts.method = opts.stage2 + + from pyanaconda.core.configuration.anaconda import conf + conf.set_from_opts(opts) +-- +2.27.0 + diff --git a/delete-datezone-map.patch b/delete-datezone-map.patch new file mode 100644 index 0000000..2286eb6 --- /dev/null +++ b/delete-datezone-map.patch @@ -0,0 +1,132 @@ +diff -Nur anaconda-33.19.org/pyanaconda/ui/gui/spokes/datetime_spoke.glade anaconda-33.19/pyanaconda/ui/gui/spokes/datetime_spoke.glade +--- anaconda-33.19.org/pyanaconda/ui/gui/spokes/datetime_spoke.glade 2021-08-07 16:25:21.547621965 +0800 ++++ anaconda-33.19/pyanaconda/ui/gui/spokes/datetime_spoke.glade 2021-08-07 16:26:20.137758256 +0800 +@@ -3,7 +3,6 @@ + + + +- + + True + False +@@ -548,19 +547,6 @@ + + + +- +- True +- False +- +- +- +- True +- True +- 6 +- 1 +- +- +- + + True + False +diff -Nur anaconda-33.19.org/pyanaconda/ui/gui/spokes/datetime_spoke.py anaconda-33.19/pyanaconda/ui/gui/spokes/datetime_spoke.py +--- anaconda-33.19.org/pyanaconda/ui/gui/spokes/datetime_spoke.py 2021-08-07 16:25:21.547621965 +0800 ++++ anaconda-33.19/pyanaconda/ui/gui/spokes/datetime_spoke.py 2021-08-07 16:30:03.958279259 +0800 +@@ -53,9 +53,8 @@ + import gi + gi.require_version("Gdk", "3.0") + gi.require_version("Gtk", "3.0") +-gi.require_version("TimezoneMap", "1.0") + +-from gi.repository import Gdk, Gtk, TimezoneMap ++from gi.repository import Gdk, Gtk + + log = get_module_logger(__name__) + +@@ -377,11 +376,6 @@ + icon = "preferences-system-time-symbolic" + title = CN_("GUI|Spoke", "_Time & Date") + +- # Hack to get libtimezonemap loaded for GtkBuilder +- # see https://bugzilla.gnome.org/show_bug.cgi?id=712184 +- _hack = TimezoneMap.TimezoneMap() +- del(_hack) +- + def __init__(self, *args): + NormalSpoke.__init__(self, *args) + +@@ -407,7 +401,6 @@ + self._yearsStore = self.builder.get_object("years") + self._regionsStore = self.builder.get_object("regions") + self._citiesStore = self.builder.get_object("cities") +- self._tzmap = self.builder.get_object("tzmap") + self._dateBox = self.builder.get_object("dateBox") + + # we need to know it the new value is the same as previous or not +@@ -530,10 +523,6 @@ + else: + return _("Invalid timezone") + else: +- location = self._tzmap.get_location() +- if location and location.get_property("zone"): +- return _("%s timezone") % get_xlated_timezone(location.get_property("zone")) +- else: + return _("Nothing selected") + + def apply(self): +@@ -585,7 +574,6 @@ + kickstart_timezone = self._timezone_module.Timezone + + if is_valid_timezone(kickstart_timezone): +- self._tzmap.set_timezone(kickstart_timezone) + time.tzset() + + self._update_datetime() +@@ -960,7 +948,6 @@ + if region == "Etc": + # Etc timezones cannot be displayed on the map, so let's reset the + # location and manually set a highlight with no location pin. +- self._tzmap.clear_location() + if city in ("GMT", "UTC"): + offset = 0.0 + # The tzdb data uses POSIX-style signs for the GMT zones, which is +@@ -971,13 +958,14 @@ + # Take the part after "GMT" + offset = -float(city[3:]) + +- self._tzmap.set_selected_offset(offset) + time.tzset() + else: + # we don't want the timezone-changed signal to be emitted +- self._tzmap.set_timezone(timezone) + time.tzset() + ++ self._tz = get_timezone(timezone) ++ self._update_datetime() ++ + # update "old" values + self._old_city = city + +@@ -1027,22 +1015,6 @@ + self._stop_and_maybe_start_time_updating(interval=5) + self._daysFilter.refilter() + +- def on_location_changed(self, tz_map, location): +- if not location: +- return +- +- timezone = location.get_property('zone') +- +- # Updating the timezone will update the region/city combo boxes to match. +- # The on_city_changed handler will attempt to convert the timezone back +- # to a location and set it in the map, which we don't want, since we +- # already have a location. That's why we're here. +- with blockedHandler(self._cityCombo, self.on_city_changed): +- if self._set_timezone(timezone): +- # timezone successfully set +- self._tz = get_timezone(timezone) +- self._update_datetime() +- + def on_timeformat_changed(self, button24h, *args): + hours = int(self._hoursLabel.get_text()) + amPm = self._amPmLabel.get_text() diff --git a/disable-disk-encryption.patch b/disable-disk-encryption.patch new file mode 100644 index 0000000..bdddea0 --- /dev/null +++ b/disable-disk-encryption.patch @@ -0,0 +1,71 @@ +From bec6776715baaff79d29e1703b7c3306c265071b Mon Sep 17 00:00:00 2001 +From: xia_qirong +Date: Wed, 16 Sep 2020 15:28:39 +0800 +Subject: [PATCH] disable disk encryption + +--- + pyanaconda/ui/gui/spokes/custom_storage.py | 8 ++++---- + pyanaconda/ui/gui/spokes/storage.py | 7 ++++--- + 2 files changed, 8 insertions(+), 7 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/custom_storage.py b/pyanaconda/ui/gui/spokes/custom_storage.py +index 347a0e0..d72e315 100644 +--- a/pyanaconda/ui/gui/spokes/custom_storage.py ++++ b/pyanaconda/ui/gui/spokes/custom_storage.py +@@ -796,8 +796,8 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + fancy_set_sensitive(self._reformatCheckbox, self._permissions.reformat) + + # Set up the encryption. +- self._encryptCheckbox.set_active(self._request.device_encrypted) +- fancy_set_sensitive(self._encryptCheckbox, self._permissions.device_encrypted) ++ self._encryptCheckbox.set_active(False) ++ fancy_set_sensitive(self._encryptCheckbox, False) + + self._encryptCheckbox.set_inconsistent(self._request.container_encrypted) + text = _("The container is encrypted.") if self._request.container_encrypted else "" +@@ -1268,7 +1268,7 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + self._encryptCheckbox.set_active(False) + self._encryptCheckbox.set_inconsistent(True) + +- fancy_set_sensitive(self._encryptCheckbox, self._permissions.device_encrypted) ++ fancy_set_sensitive(self._encryptCheckbox, False) + self._update_luks_combo() + + # Update the UI. +@@ -1490,7 +1490,7 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + + # Update the UI. + fancy_set_sensitive(self._labelEntry, self._permissions.label) +- fancy_set_sensitive(self._encryptCheckbox, self._permissions.device_encrypted) ++ fancy_set_sensitive(self._encryptCheckbox, False) + self._update_luks_combo() + fancy_set_sensitive(self._fsCombo, self._permissions.format_type) + self.on_value_changed() +diff --git a/pyanaconda/ui/gui/spokes/storage.py b/pyanaconda/ui/gui/spokes/storage.py +index 9494d6a..b2c0d3e 100644 +--- a/pyanaconda/ui/gui/spokes/storage.py ++++ b/pyanaconda/ui/gui/spokes/storage.py +@@ -289,6 +289,9 @@ class StorageSpoke(NormalSpoke, StorageCheckHandler): + # Configure the partitioning methods. + self._configure_partitioning_methods() + ++ # disable disk encryption ++ self._encryption_revealer.set_reveal_child(False) ++ + def _configure_partitioning_methods(self): + if "CustomPartitioningSpoke" in conf.ui.hidden_spokes: + self._custom_part_radio_button.set_visible(False) +@@ -325,9 +328,7 @@ class StorageSpoke(NormalSpoke, StorageCheckHandler): + # as Blivet GUI handles encryption per encrypted device, not globally. + # Hide it also for the interactive partitioning as CustomPartitioningSpoke + # provides support for encryption of mount points. +- self._encryption_revealer.set_reveal_child( +- current_partitioning_method == PARTITIONING_METHOD_AUTOMATIC +- ) ++ self._encryption_revealer.set_reveal_child(False) + + # Hide the reclaim space checkbox if automatic storage configuration is not used. + self._reclaim_revealer.set_reveal_child( +-- +1.8.3.1 + diff --git a/disable-product-name-in-welcome-is-uppercase.patch b/disable-product-name-in-welcome-is-uppercase.patch new file mode 100644 index 0000000..f61e2e6 --- /dev/null +++ b/disable-product-name-in-welcome-is-uppercase.patch @@ -0,0 +1,25 @@ +From e1d294331217ec7380f7f186d7a6837e72770432 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Fri, 19 Jun 2020 11:42:23 +0800 +Subject: [PATCH] disable-product-name-in-welcome-is-uppercase + +--- + pyanaconda/ui/gui/spokes/welcome.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pyanaconda/ui/gui/spokes/welcome.py b/pyanaconda/ui/gui/spokes/welcome.py +index 3373f1d..639fdcd 100644 +--- a/pyanaconda/ui/gui/spokes/welcome.py ++++ b/pyanaconda/ui/gui/spokes/welcome.py +@@ -251,7 +251,7 @@ class WelcomeLanguageSpoke(StandaloneSpoke, LangLocaleHandler): + languageEntry.set_placeholder_text(_(self._origStrings[languageEntry])) + + # And of course, don't forget the underlying window. +- self.window.set_property("distribution", distributionText().upper()) ++ self.window.set_property("distribution", distributionText()) + self.window.retranslate() + + # Retranslate the window title text +-- +2.23.0 + diff --git a/disable-set-passwd-without-confirmation.patch b/disable-set-passwd-without-confirmation.patch new file mode 100644 index 0000000..2966c66 --- /dev/null +++ b/disable-set-passwd-without-confirmation.patch @@ -0,0 +1,27 @@ +From a51168ce9ab849f857efd96eae66cff5247a45a4 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Thu, 18 Jun 2020 17:35:01 +0800 +Subject: [PATCH] disable set passwd without confirmation + +--- + pyanaconda/input_checking.py | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/pyanaconda/input_checking.py b/pyanaconda/input_checking.py +index 9efbd7b..b0fd605 100644 +--- a/pyanaconda/input_checking.py ++++ b/pyanaconda/input_checking.py +@@ -412,6 +412,10 @@ class PasswordValidityCheck(InputCheck): + pw_score = 4 + status_text = _(constants.SecretStatus.STRONG.value) + ++ #disable set password without confirnation ++ if not error_message and not check_request.password_confirmation: ++ error_message = _(constants.SECRET_CONFIRM_ERROR_GUI[check_request.secret_type]) ++ + # the policy influences the overall success of the check + # - score 0 & strict == True -> success = False + # - score 0 & strict == False -> success = True +-- +2.23.0 + diff --git a/disable-ssh-login-checkbox.patch b/disable-ssh-login-checkbox.patch new file mode 100644 index 0000000..3a7403d --- /dev/null +++ b/disable-ssh-login-checkbox.patch @@ -0,0 +1,38 @@ +From 6920ff7aa9c0215a032e00a5406b943737903c72 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Wed, 1 Jul 2020 18:08:35 +0800 +Subject: [PATCH] disable ssh login checkbox + +--- + pyanaconda/ui/gui/spokes/root_password.py | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/root_password.py b/pyanaconda/ui/gui/spokes/root_password.py +index 313ba0f..2af9111 100644 +--- a/pyanaconda/ui/gui/spokes/root_password.py ++++ b/pyanaconda/ui/gui/spokes/root_password.py +@@ -72,6 +72,8 @@ class PasswordSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler) + self._password_label = self.builder.get_object("password_label") + self._lock = self.builder.get_object("lock") + self._root_password_ssh_login_override = self.builder.get_object("root_password_ssh_login_override") ++ self._root_password_ssh_login_override.set_visible(False) ++ self._root_password_ssh_login_override.set_no_show_all(True) + + # Install the password checks: + # - Has a password been specified? +@@ -147,9 +149,9 @@ class PasswordSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler) + # we use the _refresh_running atribute to differentiate + # it from "real" clicks + self._lock.set_active(self._users_module.IsRootAccountLocked) +- self._root_password_ssh_login_override.set_active( +- self._users_module.RootPasswordSSHLoginAllowed +- ) ++ self._root_password_ssh_login_override.set_visible(False) ++ self._root_password_ssh_login_override.set_no_show_all(True) ++ + if not self._lock.get_active(): + # rerun checks so that we have a correct status message, if any + self.checker.run_checks() +-- +2.23.0 + diff --git a/fix-0-storage-devices-selected.patch b/fix-0-storage-devices-selected.patch new file mode 100644 index 0000000..081aa85 --- /dev/null +++ b/fix-0-storage-devices-selected.patch @@ -0,0 +1,40 @@ +From aa819ebee288aa307dc204337228c402189fd5e5 Mon Sep 17 00:00:00 2001 +From: "Qr.Xia" <69908158+xqrustc2020@users.noreply.github.com> +Date: Mon, 12 Oct 2020 11:04:27 +0800 +Subject: [PATCH] fix 0 storage devices selected + +"0 storage devices selected" is printed because the format_type of sda +is "ext4" rather than "disklabel", and disk 'sda' is filtered by +filter_disks_by_names(partitioned_devices, selected_disks). +Resolves: rhbz#1878661 +--- + pyanaconda/ui/gui/spokes/custom_storage.py | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/custom_storage.py b/pyanaconda/ui/gui/spokes/custom_storage.py +index d72e315..b89866c 100644 +--- a/pyanaconda/ui/gui/spokes/custom_storage.py ++++ b/pyanaconda/ui/gui/spokes/custom_storage.py +@@ -47,7 +47,7 @@ from pyanaconda.modules.common.structures.partitioning import PartitioningReques + from pyanaconda.modules.common.structures.device_factory import DeviceFactoryRequest, \ + DeviceFactoryPermissions + from pyanaconda.product import productName, productVersion +-from pyanaconda.ui.lib.storage import reset_bootloader, create_partitioning, filter_disks_by_names ++from pyanaconda.ui.lib.storage import reset_bootloader, create_partitioning + from pyanaconda.core.storage import DEVICE_TYPE_UNSUPPORTED, DEVICE_TEXT_MAP, \ + MOUNTPOINT_DESCRIPTIONS, NAMED_DEVICE_TYPES, CONTAINER_DEVICE_TYPES, device_type_from_autopart, \ + PROTECTED_FORMAT_TYPES, DEVICE_TYPE_BTRFS, DEVICE_TYPE_MD, Size +@@ -303,9 +303,7 @@ class CustomPartitioningSpoke(NormalSpoke, StorageCheckHandler): + self._default_file_system = self._device_tree.GetDefaultFileSystem() + + # Initialize the selected disks. +- selected_disks = self._disk_selection.SelectedDisks +- partitioned_devices = self._device_tree.GetPartitioned() +- self._selected_disks = filter_disks_by_names(partitioned_devices, selected_disks) ++ self._selected_disks = self._disk_selection.SelectedDisks + + # Update the UI elements. + self._do_refresh(init_expanded_pages=True) +-- +2.23.0 + diff --git a/fix-hostname-info.patch b/fix-hostname-info.patch new file mode 100644 index 0000000..3f0496c --- /dev/null +++ b/fix-hostname-info.patch @@ -0,0 +1,47 @@ +From 8ecae3e85d9eeedb131dbc026dcdf5bba95cdb15 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Thu, 18 Jun 2020 17:13:47 +0800 +Subject: [PATCH] fix hostname info + +--- + po/zh_CN.po | 6 +++--- + pyanaconda/network.py | 4 ++-- + 2 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/po/zh_CN.po b/po/zh_CN.po +index 0a4d1cd..be8cefe 100644 +--- a/po/zh_CN.po ++++ b/po/zh_CN.po +@@ -2365,11 +2365,11 @@ msgstr "本地主机名绝不能以句号\".\"结尾。" + #: pyanaconda/network.py:119 + msgid "" + "Host names can only contain the characters 'a-z', 'A-Z', '0-9', '-', or '.', " +-"parts between periods must contain something and cannot start or end with " +-"'-'." ++"parts between periods must contain something being 63 or fewer " ++"characters and cannot start or end with '-'." + msgstr "" + "主机名只能包含 'a-z', 'A-Z', '0-9', '-'(英文减号), 或者 '.'(英文点号),其" +-"中两个点号中不能为空且不能以'-'开头或结尾。" ++"中两个点号中不能为空,必须少于64个字符且不能以'-'开头或结尾。" + + #: pyanaconda/network.py:465 + msgid "Status not available" +diff --git a/pyanaconda/network.py b/pyanaconda/network.py +index f8e9b19..127a1bd 100644 +--- a/pyanaconda/network.py ++++ b/pyanaconda/network.py +@@ -118,8 +118,8 @@ def is_valid_hostname(hostname, local=False): + if not re.match('^' + HOSTNAME_PATTERN_WITHOUT_ANCHORS + '$', hostname): + return (False, _("Host names can only contain the characters 'a-z', " + "'A-Z', '0-9', '-', or '.', parts between periods " +- "must contain something and cannot start or end with " +- "'-'.")) ++ "must contain something being 63 or fewer " ++ "characters and cannot start or end with '-'.")) + + return (True, "") + +-- +2.23.0 + diff --git a/fix-remove-unknow-partition-is-sda-failed.patch b/fix-remove-unknow-partition-is-sda-failed.patch new file mode 100644 index 0000000..b38a5a1 --- /dev/null +++ b/fix-remove-unknow-partition-is-sda-failed.patch @@ -0,0 +1,46 @@ +From ad48d3ab850c9dd40908a51eae3580fcc148e171 Mon Sep 17 00:00:00 2001 +From: xqrustc2020 <69908158+xqrustc2020@users.noreply.github.com> +Date: Tue, 29 Sep 2020 13:16:43 +0800 +Subject: [PATCH] fix remove unkown partition in sda failed + +fix: cannot create partition when sda exists a ext4 filesystem. +When you clicked the "-" button, only the format on sda should be +destroyed without removing sda from the device tree, but sda +was also destroyed. As a result, sda cannot be found during disk +initialization and an error was reported. +Resolves: rhbz#1878661 +--- + .../modules/storage/partitioning/interactive/utils.py | 10 ++++------ + 1 file changed, 4 insertions(+), 6 deletions(-) + +diff --git a/pyanaconda/modules/storage/partitioning/interactive/utils.py b/pyanaconda/modules/storage/partitioning/interactive/utils.py +index b52876a..fe7bd59 100644 +--- a/pyanaconda/modules/storage/partitioning/interactive/utils.py ++++ b/pyanaconda/modules/storage/partitioning/interactive/utils.py +@@ -1046,8 +1046,10 @@ def _destroy_device(storage, device): + :param device: an instance of a device + """ + # Remove the device. +- if device.is_disk and device.partitioned and not device.format.supported: +- storage.recursive_remove(device) ++ if device.is_disk: ++ if device.partitioned and not device.format.supported: ++ storage.recursive_remove(device) ++ storage.initialize_disk(device) + elif device.direct and not device.isleaf: + # We shouldn't call this method for with non-leaf devices + # except for those which are also directly accessible like +@@ -1057,10 +1059,6 @@ def _destroy_device(storage, device): + else: + storage.destroy_device(device) + +- # Initialize the disk. +- if device.is_disk: +- storage.initialize_disk(device) +- + # Remove empty extended partitions. + if getattr(device, "is_logical", False): + storage.remove_empty_extended_partitions() +-- +2.23.0 + diff --git a/fix-xorg-timeout-and-throw-exception.patch b/fix-xorg-timeout-and-throw-exception.patch new file mode 100644 index 0000000..43b31ba --- /dev/null +++ b/fix-xorg-timeout-and-throw-exception.patch @@ -0,0 +1,207 @@ +From 0b851a3f25d6e2ac7e6d06e342d0823c206ee25a Mon Sep 17 00:00:00 2001 +From: Vladimir Slavik +Date: Wed, 21 Apr 2021 20:00:54 +0200 +Subject: [PATCH] Another attempt at the X thing. This gives up the exception + handler test temporarily, and solves almost everything. + +The main problem of other solutions is that once X starts, +it steals the screen by going to tty6. If the exception handler +test-invoking handler is not returned back immediately, +an "after-timeout" handler can be installed instead, which switches back to tty1. + +With that in place, it's also safe to terminate Xorg once +it's clear it's not coming in time. The termination will happen later, +but that does not matter any more. + +Finally, with the termination happening, +it is also safe to return the crash report text handler. + +Resolves: rhbz#1918702 + +Previous work: #3107, #3132, #3141, #3295. Thanks to @bitcoffeeiux and @poncovka. + +The one avenue left unexplored is using the -displayfd option. +--- + pyanaconda/core/util.py | 62 ++++++++++++++++++++++++++++++++--------- + pyanaconda/display.py | 29 +++++++++++++++++-- + 2 files changed, 75 insertions(+), 16 deletions(-) + +diff --git a/pyanaconda/core/util.py b/pyanaconda/core/util.py +index b7a1731..3013cd8 100644 +--- a/pyanaconda/core/util.py ++++ b/pyanaconda/core/util.py +@@ -47,7 +47,7 @@ from pyanaconda.core.constants import DRACUT_SHUTDOWN_EJECT, TRANSLATIONS_UPDATE + IPMI_ABORTED, X_TIMEOUT, TAINT_HARDWARE_UNSUPPORTED, TAINT_SUPPORT_REMOVED, \ + WARNING_HARDWARE_UNSUPPORTED, WARNING_SUPPORT_REMOVED + from pyanaconda.core.constants import SCREENSHOTS_DIRECTORY, SCREENSHOTS_TARGET_DIRECTORY +-from pyanaconda.errors import RemovedModuleError, ExitError ++from pyanaconda.errors import RemovedModuleError + + from pyanaconda.anaconda_logging import program_log_lock + from pyanaconda.anaconda_loggers import get_module_logger, get_program_logger +@@ -204,6 +204,19 @@ def startProgram(argv, root='/', stdin=None, stdout=subprocess.PIPE, stderr=subp + preexec_fn=preexec, cwd=root, env=env, **kwargs) + + ++class X11Status: ++ """Status of Xorg launch. ++ ++ Values of an instance can be modified from the handler functions. ++ """ ++ def __init__(self): ++ self.started = False ++ self.timed_out = False ++ ++ def needs_waiting(self): ++ return not (self.started or self.timed_out) ++ ++ + def startX(argv, output_redirect=None, timeout=X_TIMEOUT): + """ Start X and return once X is ready to accept connections. + +@@ -217,28 +230,36 @@ def startX(argv, output_redirect=None, timeout=X_TIMEOUT): + :param output_redirect: file or file descriptor to redirect stdout and stderr to + :param timeout: Number of seconds to timing out. + """ +- # Use a list so the value can be modified from the handler function +- x11_started = [False] ++ x11_status = X11Status() + +- def sigusr1_handler(num, frame): ++ # Handle successful start before timeout ++ def sigusr1_success_handler(num, frame): + log.debug("X server has signalled a successful start.") +- x11_started[0] = True ++ x11_status.started = True + + # Fail after, let's say a minute, in case something weird happens + # and we don't receive SIGUSR1 + def sigalrm_handler(num, frame): + # Check that it didn't make it under the wire +- if x11_started[0]: ++ if x11_status.started: + return ++ x11_status.timed_out = True + log.error("Timeout trying to start %s", argv[0]) +- raise ExitError("Timeout trying to start %s" % argv[0]) + +- # preexec_fn to add the SIGUSR1 handler in the child ++ # Handle delayed start after timeout ++ def sigusr1_too_late_handler(num, frame): ++ if x11_status.timed_out: ++ log.debug("SIGUSR1 received after X server timeout. Switching back to tty1. " ++ "SIGUSR1 now again initiates test of exception reporting.") ++ signal.signal(signal.SIGUSR1, old_sigusr1_handler) ++ ++ # preexec_fn to add the SIGUSR1 handler in the child we are starting ++ # see man page XServer(1), section "signals" + def sigusr1_preexec(): + signal.signal(signal.SIGUSR1, signal.SIG_IGN) + + try: +- old_sigusr1_handler = signal.signal(signal.SIGUSR1, sigusr1_handler) ++ old_sigusr1_handler = signal.signal(signal.SIGUSR1, sigusr1_success_handler) + old_sigalrm_handler = signal.signal(signal.SIGALRM, sigalrm_handler) + + # Start the timer +@@ -249,16 +270,31 @@ def startX(argv, output_redirect=None, timeout=X_TIMEOUT): + preexec_fn=sigusr1_preexec) + WatchProcesses.watch_process(childproc, argv[0]) + +- # Wait for SIGUSR1 +- while not x11_started[0]: ++ # Wait for SIGUSR1 or SIGALRM ++ while x11_status.needs_waiting(): + signal.pause() + + finally: +- # Put everything back where it was ++ # Stop the timer + signal.alarm(0) +- signal.signal(signal.SIGUSR1, old_sigusr1_handler) + signal.signal(signal.SIGALRM, old_sigalrm_handler) + ++ # Handle outcome of X start attempt ++ if x11_status.started: ++ signal.signal(signal.SIGUSR1, old_sigusr1_handler) ++ elif x11_status.timed_out: ++ signal.signal(signal.SIGUSR1, sigusr1_too_late_handler) ++ # Kill Xorg because from now on we will not use it. It will exit only after sending ++ # the signal, but at least we don't have to track that. ++ WatchProcesses.unwatch_process(childproc) ++ childproc.terminate() ++ log.debug("Exception handler test suspended to prevent accidental activation by " ++ "delayed Xorg start. Next SIGUSR1 will be handled as delayed Xorg start.") ++ # Raise an exception to notify the caller that things went wrong. This affects ++ # particularly pyanaconda.display.do_startup_x11_actions(), where the window manager ++ # is started immediately after this. The WM would just wait forever. ++ raise TimeoutError("Timeout trying to start %s" % argv[0]) ++ + + def _run_program(argv, root='/', stdin=None, stdout=None, env_prune=None, log_output=True, + binary_output=False, filter_stderr=False): +diff --git a/pyanaconda/display.py b/pyanaconda/display.py +index 8379d9c..b577eb8 100644 +--- a/pyanaconda/display.py ++++ b/pyanaconda/display.py +@@ -22,6 +22,7 @@ + import os + import subprocess + import time ++import textwrap + import pkgutil + + from pyanaconda.core.configuration.anaconda import conf +@@ -49,6 +50,14 @@ from pyanaconda.anaconda_loggers import get_module_logger, get_stdout_logger + log = get_module_logger(__name__) + stdout_log = get_stdout_logger() + ++X_TIMEOUT_ADVICE = \ ++ "Do not load the stage2 image over a slow network link.\n" \ ++ "Wait longer for the X server startup with the inst.xtimeout= boot option." \ ++ "The default is 60 seconds.\n" \ ++ "Load the stage2 image into memory with the rd.live.ram boot option to decrease access " \ ++ "time.\n" \ ++ "Enforce text mode when installing from remote media with the inst.text boot option." ++# on RHEL also: "Use the customer portal download URL in ilo/drac devices for greater speed." + + # Spice + +@@ -78,7 +87,7 @@ def ask_vnc_question(anaconda, vnc_server, message): + App.initialize() + loop = App.get_event_loop() + loop.set_quit_callback(tui_quit_callback) +- spoke = AskVNCSpoke(anaconda.ksdata, message) ++ spoke = AskVNCSpoke(anaconda.ksdata, message=message) + ScreenHandler.schedule_screen(spoke) + App.run() + +@@ -314,9 +323,23 @@ def setup_display(anaconda, options): + try: + start_x11(xtimeout) + do_startup_x11_actions() +- except (OSError, RuntimeError) as e: ++ except TimeoutError as e: + log.warning("X startup failed: %s", e) +- stdout_log.warning("X startup failed, falling back to text mode") ++ print("\nX did not start in the expected time, falling back to text mode. There are " ++ "multiple ways to avoid this issue:") ++ wrapper = textwrap.TextWrapper(initial_indent=" * ", subsequent_indent=" ", ++ width=os.get_terminal_size().columns - 3) ++ for line in X_TIMEOUT_ADVICE.split("\n"): ++ print(wrapper.fill(line)) ++ util.vtActivate(1) ++ anaconda.display_mode = constants.DisplayModes.TUI ++ anaconda.gui_startup_failed = True ++ time.sleep(2) ++ ++ except (OSError, RuntimeError) as e: ++ log.warning("X or window manager startup failed: %s", e) ++ print("\nX or window manager startup failed, falling back to text mode.") ++ util.vtActivate(1) + anaconda.display_mode = constants.DisplayModes.TUI + anaconda.gui_startup_failed = True + time.sleep(2) +-- +2.23.0 + diff --git a/hide-help-button.patch b/hide-help-button.patch new file mode 100644 index 0000000..9a089a0 --- /dev/null +++ b/hide-help-button.patch @@ -0,0 +1,131 @@ +From cf192d77045b8aeb8cdcd55c98a93ad64fea3c3b Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Fri, 19 Jun 2020 09:20:14 +0800 +Subject: [PATCH] hide help button + +--- + data/tmux.conf | 3 +-- + pyanaconda/ui/gui/__init__.py | 27 --------------------------- + widgets/src/BaseWindow.c | 21 --------------------- + 3 files changed, 1 insertion(+), 50 deletions(-) + +diff --git a/data/tmux.conf b/data/tmux.conf +index 87c9cb7..63240f7 100644 +--- a/data/tmux.conf ++++ b/data/tmux.conf +@@ -1,7 +1,6 @@ + # tmux.conf for the anaconda environment + + bind -n M-tab next +-bind -n F1 list-keys + + set-option -s exit-unattached off + set-option -g base-index 1 +@@ -25,7 +24,7 @@ set-option -g history-limit 10000 + # rhbz#1722181 + new-session -d -s anaconda -n main "LD_PRELOAD=libgomp.so.1 anaconda" + +-set-option status-right '#[fg=blue]#(echo -n "Switch tab: Alt+Tab | Help: F1 ")' ++set-option status-right '#[fg=blue]#(echo -n "Switch tab: Alt+Tab ")' + + new-window -d -n shell "bash --login" + new-window -d -n log "tail -F /tmp/anaconda.log" +diff --git a/pyanaconda/ui/gui/__init__.py b/pyanaconda/ui/gui/__init__.py +index 06373d9..6a6e3b9 100644 +--- a/pyanaconda/ui/gui/__init__.py ++++ b/pyanaconda/ui/gui/__init__.py +@@ -443,20 +443,6 @@ class MainWindow(Gtk.Window): + # Return False to indicate that the child allocation is not yet set + return False + +- def _on_mnemonics_visible_changed(self, window, property_type, obj): +- # mnemonics display has been activated or deactivated, +- # add or remove the F1 mnemonics display from the help button +- help_button = obj.window.get_help_button() +- if window.props.mnemonics_visible: +- # save current label +- old_label = help_button.get_label() +- self._saved_help_button_label = old_label +- # add the (F1) "mnemonics" to the help button +- help_button.set_label("%s (F1)" % old_label) +- else: +- # restore the old label +- help_button.set_label(self._saved_help_button_label) +- + def _on_child_added(self, widget, user_data): + # If this is GtkLabel, apply the language attribute + if isinstance(widget, Gtk.Label): +@@ -480,8 +466,6 @@ class MainWindow(Gtk.Window): + old_screen = self._stack.get_visible_child() + if old_screen: + old_screen.remove_accelerator(self._accel_group, Gdk.KEY_F12, 0) +- old_screen.remove_accelerator(self._accel_group, Gdk.KEY_F1, 0) +- old_screen.remove_accelerator(self._accel_group, Gdk.KEY_F1, Gdk.ModifierType.MOD1_MASK) + + # Check if the widget is already on the stack + if child not in self._stack_contents: +@@ -498,17 +482,6 @@ class MainWindow(Gtk.Window): + child.window.add_accelerator("button-clicked", self._accel_group, + Gdk.KEY_F12, 0, 0) + +- # Configure the help button +- child.window.add_accelerator("help-button-clicked", self._accel_group, +- Gdk.KEY_F1, 0, 0) +- child.window.add_accelerator("help-button-clicked", self._accel_group, +- Gdk.KEY_F1, Gdk.ModifierType.MOD1_MASK, 0) +- +- # Connect to mnemonics-visible to add the (F1) mnemonic to the button label +- if self._mnemonic_signal: +- self.disconnect(self._mnemonic_signal) +- self._mnemonic_signal = self.connect("notify::mnemonics-visible", self._on_mnemonics_visible_changed, child) +- + self._stack.set_visible_child(child.window) + + if child.focusWidgetName: +diff --git a/widgets/src/BaseWindow.c b/widgets/src/BaseWindow.c +index 6a1e372..203d4a7 100644 +--- a/widgets/src/BaseWindow.c ++++ b/widgets/src/BaseWindow.c +@@ -393,30 +393,11 @@ G_GNUC_END_IGNORE_DEPRECATIONS + gtk_widget_set_margin_top(win->priv->layout_indicator, 6); + gtk_widget_set_margin_bottom(win->priv->layout_indicator, 6); + +- /* Create the help button. */ +- win->priv->help_button = gtk_button_new_with_label(_(HELP_BUTTON_LABEL)); +- gtk_widget_set_halign(win->priv->help_button, GTK_ALIGN_END); +- gtk_widget_set_vexpand(win->priv->help_button, FALSE); +- gtk_widget_set_valign(win->priv->help_button, GTK_ALIGN_END); +- gtk_widget_set_margin_bottom(win->priv->help_button, 6); +- gtk_widget_set_name(win->priv->help_button, "anaconda-help-button"); +- +- atk = gtk_widget_get_accessible(win->priv->help_button); +- atk_object_set_name(atk, _(HELP_BUTTON_LABEL)); +- +- /* Hook up some signals for that button. The signal handlers here will +- * just raise our own custom signals for the whole window. +- */ +- g_signal_connect(win->priv->help_button, "clicked", +- G_CALLBACK(anaconda_base_window_help_button_clicked), win); +- +- + /* Add everything to the nav area. */ + gtk_grid_attach(GTK_GRID(win->priv->nav_area), win->priv->name_label, 0, 0, 1, 1); + gtk_grid_attach(GTK_GRID(win->priv->nav_area), win->priv->distro_label, 1, 0, 2, 1); + gtk_grid_attach(GTK_GRID(win->priv->nav_area), win->priv->beta_label, 1, 1, 1, 1); + gtk_grid_attach(GTK_GRID(win->priv->nav_area), win->priv->layout_indicator, 1, 2, 1, 1); +- gtk_grid_attach(GTK_GRID(win->priv->nav_area), win->priv->help_button, 2, 1, 1, 2); + + /* Last thing for the main_box is a revealer for the info bar */ + win->priv->info_revealer = gtk_revealer_new(); +@@ -832,8 +813,6 @@ void anaconda_base_window_retranslate(AnacondaBaseWindow *win) { + + gtk_label_set_text(GTK_LABEL(win->priv->beta_label), _(win->priv->orig_beta)); + +- gtk_button_set_label(GTK_BUTTON(win->priv->help_button), _(HELP_BUTTON_LABEL)); +- + /* retranslate the layout indicator */ + anaconda_layout_indicator_retranslate(ANACONDA_LAYOUT_INDICATOR(win->priv->layout_indicator)); + } +-- +2.23.0 + diff --git a/make-name-not-force-to-uppercase.patch b/make-name-not-force-to-uppercase.patch new file mode 100644 index 0000000..b7eef41 --- /dev/null +++ b/make-name-not-force-to-uppercase.patch @@ -0,0 +1,62 @@ +From 853c4c8307c7427e5ee4fb57a2ffdcad1ce9d7f6 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Thu, 18 Jun 2020 22:39:12 +0800 +Subject: [PATCH] make name not force to uppercase + +--- + pyanaconda/ui/gui/__init__.py | 4 ++-- + pyanaconda/ui/gui/hubs/__init__.py | 2 +- + pyanaconda/ui/gui/spokes/welcome.py | 2 +- + 3 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/pyanaconda/ui/gui/__init__.py b/pyanaconda/ui/gui/__init__.py +index 8c20423..06373d9 100644 +--- a/pyanaconda/ui/gui/__init__.py ++++ b/pyanaconda/ui/gui/__init__.py +@@ -839,7 +839,7 @@ class GraphicalUserInterface(UserInterface): + self._currentAction.refresh() + + self._currentAction.window.set_beta(not self._isFinal) +- self._currentAction.window.set_property("distribution", self._distributionText().upper()) ++ self._currentAction.window.set_property("distribution", self._distributionText()) + + # Set some program-wide settings. + settings = Gtk.Settings.get_default() +@@ -1005,7 +1005,7 @@ class GraphicalUserInterface(UserInterface): + + nextAction.initialize() + nextAction.window.set_beta(self._currentAction.window.get_beta()) +- nextAction.window.set_property("distribution", self._distributionText().upper()) ++ nextAction.window.set_property("distribution", self._distributionText()) + + if not nextAction.showable: + self._currentAction.window.hide() +diff --git a/pyanaconda/ui/gui/hubs/__init__.py b/pyanaconda/ui/gui/hubs/__init__.py +index 062e04e..a99e438 100644 +--- a/pyanaconda/ui/gui/hubs/__init__.py ++++ b/pyanaconda/ui/gui/hubs/__init__.py +@@ -145,7 +145,7 @@ class Hub(GUIObject, common.Hub): + # From here on, this Spoke will always exist. + spoke = spokeClass(self.data, self.storage, self.payload) + spoke.window.set_beta(self.window.get_beta()) +- spoke.window.set_property("distribution", distributionText().upper()) ++ spoke.window.set_property("distribution", distributionText()) + + # If a spoke is not showable, it is unreachable in the UI. We + # might as well get rid of it. +diff --git a/pyanaconda/ui/gui/spokes/welcome.py b/pyanaconda/ui/gui/spokes/welcome.py +index 4f1bfbc..3373f1d 100644 +--- a/pyanaconda/ui/gui/spokes/welcome.py ++++ b/pyanaconda/ui/gui/spokes/welcome.py +@@ -241,7 +241,7 @@ class WelcomeLanguageSpoke(StandaloneSpoke, LangLocaleHandler): + welcomeLabel = self.builder.get_object("welcomeLabel") + + welcomeLabel.set_text(_("WELCOME TO %(name)s %(version)s.") % +- {"name" : productName.upper(), "version" : productVersion}) # pylint: disable=no-member ++ {"name" : productName, "version" : productVersion}) # pylint: disable=no-member + + # Retranslate the language (filtering) entry's placeholder text + languageEntry = self.builder.get_object("languageEntry") +-- +2.23.0 + diff --git a/modify-arguments-parsing.patch b/modify-arguments-parsing.patch new file mode 100644 index 0000000..d52570b --- /dev/null +++ b/modify-arguments-parsing.patch @@ -0,0 +1,25 @@ +From aa21df647d18390124379026dbaf99741d336875 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Fri, 19 Jun 2020 10:43:24 +0800 +Subject: [PATCH] modify arguments parsing + +--- + pyanaconda/argument_parsing.py | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/pyanaconda/argument_parsing.py b/pyanaconda/argument_parsing.py +index 6b2f40f..ce29587 100644 +--- a/pyanaconda/argument_parsing.py ++++ b/pyanaconda/argument_parsing.py +@@ -592,7 +592,7 @@ def getArgumentParser(version_string, boot_cmdline=None): + + # some defaults change based on cmdline flags + if boot_cmdline is not None: +- if "console" in boot_cmdline: ++ if "console" in boot_cmdline and "inst.text" in boot_cmdline: + ap.set_defaults(display_mode=DisplayModes.TUI) + + return ap +-- +2.23.0 + diff --git a/modify-default-timezone.patch b/modify-default-timezone.patch new file mode 100644 index 0000000..5eebc24 --- /dev/null +++ b/modify-default-timezone.patch @@ -0,0 +1,55 @@ +From 59a4b9b04388e327e135f3ab9893698e2b3f5a5d Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Fri, 19 Jun 2020 11:44:31 +0800 +Subject: [PATCH] modify default timezone + +--- + pyanaconda/modules/timezone/installation.py | 4 ++-- + pyanaconda/modules/timezone/timezone.py | 2 +- + pyanaconda/ui/gui/spokes/datetime_spoke.py | 2 +- + 3 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/pyanaconda/modules/timezone/installation.py b/pyanaconda/modules/timezone/installation.py +index 6383df1..9c6ae40 100644 +--- a/pyanaconda/modules/timezone/installation.py ++++ b/pyanaconda/modules/timezone/installation.py +@@ -63,8 +63,8 @@ class ConfigureTimezoneTask(Task): + if not is_valid_timezone(self._timezone): + # this should never happen, but for pity's sake + log.warning("Timezone %s set in kickstart is not valid, " +- "falling back to default (America/New_York).", self._timezone) +- self._timezone = "America/New_York" ++ "falling back to default (Asia/Shanghai).", self._timezone) ++ self._timezone = "Asia/Shanghai" + + def _make_timezone_symlink(self): + """Create the symlink that actually defines timezone.""" +diff --git a/pyanaconda/modules/timezone/timezone.py b/pyanaconda/modules/timezone/timezone.py +index 0678072..db1cd18 100644 +--- a/pyanaconda/modules/timezone/timezone.py ++++ b/pyanaconda/modules/timezone/timezone.py +@@ -40,7 +40,7 @@ class TimezoneService(KickstartService): + def __init__(self): + super().__init__() + self.timezone_changed = Signal() +- self._timezone = "America/New_York" ++ self._timezone = "Asia/Shanghai" + + self.is_utc_changed = Signal() + self._is_utc = False +diff --git a/pyanaconda/ui/gui/spokes/datetime_spoke.py b/pyanaconda/ui/gui/spokes/datetime_spoke.py +index 00b1bd9..f01d245 100644 +--- a/pyanaconda/ui/gui/spokes/datetime_spoke.py ++++ b/pyanaconda/ui/gui/spokes/datetime_spoke.py +@@ -65,7 +65,7 @@ SERVER_POOL = 1 + SERVER_WORKING = 2 + SERVER_USE = 3 + +-DEFAULT_TZ = "America/New_York" ++DEFAULT_TZ = "Asia/Shanghai" + + SPLIT_NUMBER_SUFFIX_RE = re.compile(r'([^0-9]*)([-+])([0-9]+)') + +-- +2.23.0 + diff --git a/modify-interface-is-extended-in-Chinese-mode.patch b/modify-interface-is-extended-in-Chinese-mode.patch new file mode 100644 index 0000000..8b7470d --- /dev/null +++ b/modify-interface-is-extended-in-Chinese-mode.patch @@ -0,0 +1,25 @@ +From 1999c1bd693ee187cc699fd1f1d4ac77086373b9 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Fri, 19 Jun 2020 09:26:00 +0800 +Subject: [PATCH] modify interface is extended in Chinese mode + +--- + po/zh_CN.po | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/po/zh_CN.po b/po/zh_CN.po +index be8cefe..7ee5511 100644 +--- a/po/zh_CN.po ++++ b/po/zh_CN.po +@@ -6429,7 +6429,7 @@ msgstr "搜索方式(_B):" + + #: pyanaconda/ui/gui/spokes/advanced_storage.glade:153 + msgid "Port / Target / LUN #" +-msgstr "端口 / 目标 / 逻辑单位数标示符 (LUN #) " ++msgstr "端口 / 目标 / LUN # " + + #: pyanaconda/ui/gui/spokes/advanced_storage.glade:154 + msgid "Target WWID" +-- +2.23.0 + diff --git a/modify-network-hostname-dot-illegal.patch b/modify-network-hostname-dot-illegal.patch new file mode 100644 index 0000000..bde92d4 --- /dev/null +++ b/modify-network-hostname-dot-illegal.patch @@ -0,0 +1,39 @@ +From 343751258b3a4b3e21cf52add5a9ddf600065835 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Fri, 19 Jun 2020 11:48:25 +0800 +Subject: [PATCH] modify network hostname dot illegal + +--- + pyanaconda/core/regexes.py | 2 +- + pyanaconda/network.py | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/core/regexes.py b/pyanaconda/core/regexes.py +index 63ab668..f86e403 100644 +--- a/pyanaconda/core/regexes.py ++++ b/pyanaconda/core/regexes.py +@@ -103,7 +103,7 @@ IPV4_NETMASK_WITH_ANCHORS = re.compile("^" + IPV4_NETMASK_WITHOUT_ANCHORS + "$") + # with a period, but it can end with one. + # This regex uses negative lookahead and lookback assertions to enforce the + # hyphen rules and make it way more confusing +-HOSTNAME_PATTERN_WITHOUT_ANCHORS = r'(?:(?!-)[A-Za-z0-9-]{1,63}(? +Date: Fri, 3 Jul 2020 11:11:46 +0200 +Subject: [PATCH] Create a new DBus structure for time sources + +Use the class TimeSourceData to represent a time source. +--- + pyanaconda/core/constants.py | 4 + + .../modules/common/structures/timezone.py | 84 +++++++++++++++++++ + 2 files changed, 88 insertions(+) + create mode 100644 pyanaconda/modules/common/structures/timezone.py + +diff --git a/pyanaconda/core/constants.py b/pyanaconda/core/constants.py +index 536529f4e0..5124f05b7f 100644 +--- a/pyanaconda/core/constants.py ++++ b/pyanaconda/core/constants.py +@@ -306,6 +306,10 @@ class SecretStatus(Enum): + # Window title text + WINDOW_TITLE_TEXT = N_("Anaconda Installer") + ++# Types of time sources. ++TIME_SOURCE_SERVER = "SERVER" ++TIME_SOURCE_POOL = "POOL" ++ + # NTP server checking + NTP_SERVER_OK = 0 + NTP_SERVER_NOK = 1 +diff --git a/pyanaconda/modules/common/structures/timezone.py b/pyanaconda/modules/common/structures/timezone.py +new file mode 100644 +index 0000000000..d18234f681 +--- /dev/null ++++ b/pyanaconda/modules/common/structures/timezone.py +@@ -0,0 +1,84 @@ ++# ++# DBus structures for the timezone data. ++# ++# Copyright (C) 2020 Red Hat, Inc. All rights reserved. ++# ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 2 of the License, or ++# (at your option) any later version. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with this program. If not, see . ++# ++from dasbus.structure import DBusData ++from dasbus.typing import * # pylint: disable=wildcard-import ++ ++from pyanaconda.core.constants import TIME_SOURCE_SERVER ++ ++__all__ = ["TimeSourceData"] ++ ++ ++class TimeSourceData(DBusData): ++ """Data for a time source.""" ++ ++ def __init__(self): ++ self._type = TIME_SOURCE_SERVER ++ self._hostname = "" ++ self._options = [] ++ ++ @property ++ def type(self) -> Str: ++ """Type of the time source. ++ ++ Supported values: ++ ++ SERVER A single NTP server ++ POOL A pool of NTP servers ++ ++ :return: a type of the time source ++ """ ++ return self._type ++ ++ @type.setter ++ def type(self, value: Str): ++ self._type = value ++ ++ @property ++ def hostname(self) -> Str: ++ """Name of the time server. ++ ++ For example: ++ ++ ntp.cesnet.cz ++ ++ :return: a host name ++ """ ++ return self._hostname ++ ++ @hostname.setter ++ def hostname(self, value: Str): ++ self._hostname = value ++ ++ @property ++ def options(self) -> List[Str]: ++ """Options of the time source. ++ ++ For example: ++ ++ nts, ntsport 1234, iburst ++ ++ See ``man chrony.conf``. ++ ++ :return: a list of options ++ """ ++ return self._options ++ ++ @options.setter ++ def options(self, value): ++ self._options = value +-- +2.23.0 diff --git a/ntp-servers-improve-002-Use-the-structure-for-time-sources-in-ntp-py.patch b/ntp-servers-improve-002-Use-the-structure-for-time-sources-in-ntp-py.patch new file mode 100644 index 0000000..0bf217c --- /dev/null +++ b/ntp-servers-improve-002-Use-the-structure-for-time-sources-in-ntp-py.patch @@ -0,0 +1,290 @@ +From a645a1b8d17310533ef2d9232855c1852558e2b8 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Fri, 3 Jul 2020 12:04:04 +0200 +Subject: [PATCH] Use the structure for time sources in ntp.py + +Modify ntp.py to work with TimeSourceData instead of strings and clean up +its functions a little. +--- + pyanaconda/ntp.py | 174 +++++++++++++++++++--------------------------- + 1 file changed, 73 insertions(+), 101 deletions(-) + +diff --git a/pyanaconda/ntp.py b/pyanaconda/ntp.py +index 16eece65e4..1b74ac9433 100644 +--- a/pyanaconda/ntp.py ++++ b/pyanaconda/ntp.py +@@ -31,6 +31,7 @@ + from pyanaconda import isys + from pyanaconda.threading import threadMgr, AnacondaThread + from pyanaconda.core.constants import THREAD_SYNC_TIME_BASENAME ++from pyanaconda.modules.common.structures.timezone import TimeSourceData + + NTP_CONFIG_FILE = "/etc/chrony.conf" + +@@ -47,21 +48,18 @@ class NTPconfigError(Exception): + pass + + +-def ntp_server_working(server): +- """ +- Tries to do an NTP request to the $server (timeout may take some time). ++def ntp_server_working(server_hostname): ++ """Tries to do an NTP request to the server (timeout may take some time). + +- :param server: hostname or IP address of an NTP server +- :type server: string ++ :param server_hostname: a host name or an IP address of an NTP server ++ :type server_hostname: string + :return: True if the given server is reachable and working, False otherwise + :rtype: bool +- + """ +- + client = ntplib.NTPClient() + + try: +- client.request(server) ++ client.request(server_hostname) + except ntplib.NTPException: + return False + # address related error +@@ -75,118 +73,89 @@ def ntp_server_working(server): + return True + + +-def pools_servers_to_internal(pools, servers): +- ret = [] +- for pool in pools: +- ret.extend(SERVERS_PER_POOL * [pool]) +- ret.extend(servers) +- +- return ret +- +- +-def internal_to_pools_and_servers(pools_servers): +- server_nums = dict() +- pools = [] +- servers = [] +- +- for item in pools_servers: +- server_nums[item] = server_nums.get(item, 0) + 1 +- +- for item in server_nums.keys(): +- if server_nums[item] >= SERVERS_PER_POOL: +- pools.extend((server_nums[item] // SERVERS_PER_POOL) * [item]) +- servers.extend((server_nums[item] % SERVERS_PER_POOL) * [item]) +- else: +- servers.extend(server_nums[item] * [item]) ++def get_servers_from_config(conf_file_path=NTP_CONFIG_FILE): ++ """Get NTP servers from a configuration file. + +- return (pools, servers) +- +- +-def get_servers_from_config(conf_file_path=NTP_CONFIG_FILE, +- srv_regexp=SRV_LINE_REGEXP): +- """ + Goes through the chronyd's configuration file looking for lines starting + with 'server'. + ++ :param conf_file_path: a path to the chronyd's configuration file + :return: servers found in the chronyd's configuration +- :rtype: list +- ++ :rtype: a list of TimeSourceData instances + """ +- +- pools = list() +- servers = list() ++ servers = [] + + try: + with open(conf_file_path, "r") as conf_file: + for line in conf_file: +- match = srv_regexp.match(line) +- if match: +- if match.group(1) == "pool": +- pools.append(match.group(2)) +- else: +- servers.append(match.group(2)) ++ match = SRV_LINE_REGEXP.match(line) ++ ++ if not match: ++ continue ++ ++ server = TimeSourceData() ++ server.type = match.group(1).upper() ++ server.hostname = match.group(2) ++ server.options = ["iburst"] ++ servers.append(server) + + except IOError as ioerr: +- msg = "Cannot open config file %s for reading (%s)" % (conf_file_path, +- ioerr.strerror) +- raise NTPconfigError(msg) ++ msg = "Cannot open config file {} for reading ({})." ++ raise NTPconfigError(msg.format(conf_file_path, ioerr.strerror)) + +- return (pools, servers) ++ return servers + + +-def save_servers_to_config(pools, servers, conf_file_path=NTP_CONFIG_FILE, +- srv_regexp=SRV_LINE_REGEXP, out_file_path=None): +- """ ++def save_servers_to_config(servers, conf_file_path=NTP_CONFIG_FILE, out_file_path=None): ++ """Save NTP servers to a configuration file. ++ + Replaces the pools and servers defined in the chronyd's configuration file + with the given ones. If the out_file is not None, then it is used for the + resulting config. + +- :type pools: iterable +- :type servers: iterable +- :param out_file_path: path to the file used for the resulting config +- ++ :param servers: a list of NTP servers and pools ++ :type servers: a list of TimeSourceData instances ++ :param conf_file_path: a path to the chronyd's configuration file ++ :param out_file_path: a path to the file used for the resulting config + """ ++ temp_path = None + + try: + old_conf_file = open(conf_file_path, "r") +- + except IOError as ioerr: +- msg = "Cannot open config file %s for reading (%s)" % (conf_file_path, +- ioerr.strerror) +- raise NTPconfigError(msg) ++ msg = "Cannot open config file {} for reading ({})." ++ raise NTPconfigError(msg.format(conf_file_path, ioerr.strerror)) + +- try: +- if out_file_path: ++ if out_file_path: ++ try: + new_conf_file = open(out_file_path, "w") +- else: +- (fildes, temp_path) = tempfile.mkstemp() +- new_conf_file = os.fdopen(fildes, "w") +- +- except IOError as ioerr: +- if out_file_path: +- msg = "Cannot open new config file %s "\ +- "for writing (%s)" % (out_file_path, ioerr.strerror) +- else: +- msg = "Cannot open temporary file %s "\ +- "for writing (%s)" % (temp_path, ioerr.strerror) +- +- raise NTPconfigError(msg) ++ except IOError as ioerr: ++ msg = "Cannot open new config file {} for writing ({})." ++ raise NTPconfigError(msg.format(out_file_path, ioerr.strerror)) ++ else: ++ try: ++ (fields, temp_path) = tempfile.mkstemp() ++ new_conf_file = os.fdopen(fields, "w") ++ except IOError as ioerr: ++ msg = "Cannot open temporary file {} for writing ({})." ++ raise NTPconfigError(msg.format(temp_path, ioerr.strerror)) + + heading = "# These servers were defined in the installation:\n" + +- #write info about the origin of the following lines ++ # write info about the origin of the following lines + new_conf_file.write(heading) + +- #write new servers and pools +- for pool in pools: +- new_conf_file.write("pool " + pool + " iburst\n") +- ++ # write new servers and pools + for server in servers: +- new_conf_file.write("server " + server + " iburst\n") ++ args = [server.type.lower(), server.hostname] + server.options ++ line = " ".join(args) + "\n" ++ new_conf_file.write(line) + +- #copy non-server lines from the old config and skip our heading ++ new_conf_file.write("\n") ++ ++ # copy non-server lines from the old config and skip our heading + for line in old_conf_file: +- if not srv_regexp.match(line) and line != heading: ++ if not SRV_LINE_REGEXP.match(line) and line != heading: + new_conf_file.write(line) + + old_conf_file.close() +@@ -199,28 +168,27 @@ def save_servers_to_config(pools, servers, conf_file_path=NTP_CONFIG_FILE, + os.unlink(temp_path) + + except OSError as oserr: +- msg = "Cannot replace the old config with "\ +- "the new one (%s)" % (oserr.strerror) ++ msg = "Cannot replace the old config with the new one ({})." ++ raise NTPconfigError(msg.format(oserr.strerror)) + +- raise NTPconfigError(msg) + ++def _one_time_sync(server, callback=None): ++ """Synchronize the system time with a given NTP server. + +-def one_time_sync(server, callback=None): +- """ + Synchronize the system time with a given NTP server. Note that this + function is blocking and will not return until the time gets synced or + querying server fails (may take some time before timeouting). + +- :param server: NTP server ++ :param server: an NTP server ++ :type server: an instance of TimeSourceData + :param callback: callback function to run after sync or failure + :type callback: a function taking one boolean argument (success) + :return: True if the sync was successful, False otherwise +- + """ + + client = ntplib.NTPClient() + try: +- results = client.request(server) ++ results = client.request(server.hostname) + isys.set_system_time(int(results.tx_time)) + success = True + except ntplib.NTPException: +@@ -235,22 +203,26 @@ def one_time_sync(server, callback=None): + + + def one_time_sync_async(server, callback=None): +- """ ++ """Asynchronously synchronize the system time with a given NTP server. ++ + Asynchronously synchronize the system time with a given NTP server. This + function is non-blocking it starts a new thread for synchronization and + returns. Use callback argument to specify the function called when the + new thread finishes if needed. + +- :param server: NTP server ++ :param server: an NTP server ++ :type server: an instance of TimeSourceData + :param callback: callback function to run after sync or failure + :type callback: a function taking one boolean argument (success) +- + """ ++ thread_name = "%s_%s" % (THREAD_SYNC_TIME_BASENAME, server.hostname) + +- thread_name = "%s_%s" % (THREAD_SYNC_TIME_BASENAME, server) ++ # syncing with the same server running + if threadMgr.get(thread_name): +- #syncing with the same server running + return + +- threadMgr.add(AnacondaThread(name=thread_name, target=one_time_sync, +- args=(server, callback))) ++ threadMgr.add(AnacondaThread( ++ name=thread_name, ++ target=_one_time_sync, ++ args=(server, callback) ++ )) +-- +2.23.0 diff --git a/ntp-servers-improve-003-Use-the-structure-for-time-sources-in-the-Timezone-module.patch b/ntp-servers-improve-003-Use-the-structure-for-time-sources-in-the-Timezone-module.patch new file mode 100644 index 0000000..ee6e626 --- /dev/null +++ b/ntp-servers-improve-003-Use-the-structure-for-time-sources-in-the-Timezone-module.patch @@ -0,0 +1,406 @@ +From 4bc1b7305199fffc78439ab1ad1cdb8272988d52 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Fri, 3 Jul 2020 12:21:11 +0200 +Subject: [PATCH] Use the structure for time sources in the Timezone module + +Modify the Timezone module to work with the DBus structures for time sources +instead of the strings. Rename the DBus property NTPServers to TimeSources. +--- + pyanaconda/modules/timezone/installation.py | 14 ++- + pyanaconda/modules/timezone/timezone.py | 44 ++++--- + .../modules/timezone/timezone_interface.py | 34 +++--- + .../pyanaconda_tests/module_timezone_test.py | 107 ++++++++++++++---- + 4 files changed, 141 insertions(+), 58 deletions(-) + +diff --git a/pyanaconda/modules/timezone/installation.py b/pyanaconda/modules/timezone/installation.py +index 6383df1103..c3ea4d7179 100644 +--- a/pyanaconda/modules/timezone/installation.py ++++ b/pyanaconda/modules/timezone/installation.py +@@ -145,12 +145,14 @@ def run(self): + return + + chronyd_conf_path = os.path.normpath(self._sysroot + ntp.NTP_CONFIG_FILE) +- pools, servers = ntp.internal_to_pools_and_servers(self._ntp_servers) + + if os.path.exists(chronyd_conf_path): + log.debug("Modifying installed chrony configuration") + try: +- ntp.save_servers_to_config(pools, servers, conf_file_path=chronyd_conf_path) ++ ntp.save_servers_to_config( ++ self._ntp_servers, ++ conf_file_path=chronyd_conf_path ++ ) + except ntp.NTPconfigError as ntperr: + log.warning("Failed to save NTP configuration: %s", ntperr) + +@@ -160,9 +162,11 @@ def run(self): + log.debug("Creating chrony configuration based on the " + "configuration from installation environment") + try: +- ntp.save_servers_to_config(pools, servers, +- conf_file_path=ntp.NTP_CONFIG_FILE, +- out_file_path=chronyd_conf_path) ++ ntp.save_servers_to_config( ++ self._ntp_servers, ++ conf_file_path=ntp.NTP_CONFIG_FILE, ++ out_file_path=chronyd_conf_path ++ ) + except ntp.NTPconfigError as ntperr: + log.warning("Failed to save NTP configuration without chrony package: %s", + ntperr) +diff --git a/pyanaconda/modules/timezone/timezone.py b/pyanaconda/modules/timezone/timezone.py +index 0678072978..ff89d1ea77 100644 +--- a/pyanaconda/modules/timezone/timezone.py ++++ b/pyanaconda/modules/timezone/timezone.py +@@ -18,10 +18,12 @@ + # Red Hat, Inc. + # + from pyanaconda.core.configuration.anaconda import conf ++from pyanaconda.core.constants import TIME_SOURCE_SERVER + from pyanaconda.core.dbus import DBus + from pyanaconda.core.signal import Signal + from pyanaconda.modules.common.base import KickstartService + from pyanaconda.modules.common.constants.services import TIMEZONE ++from pyanaconda.modules.common.structures.timezone import TimeSourceData + from pyanaconda.timezone import NTP_PACKAGE + from pyanaconda.modules.common.containers import TaskContainer + from pyanaconda.modules.common.structures.requirement import Requirement +@@ -48,8 +50,8 @@ def __init__(self): + self.ntp_enabled_changed = Signal() + self._ntp_enabled = True + +- self.ntp_servers_changed = Signal() +- self._ntp_servers = [] ++ self.time_sources_changed = Signal() ++ self._time_sources = [] + + # FIXME: temporary workaround until PAYLOAD module is available + self._ntp_excluded = False +@@ -70,7 +72,17 @@ def process_kickstart(self, data): + self.set_timezone(data.timezone.timezone) + self.set_is_utc(data.timezone.isUtc) + self.set_ntp_enabled(not data.timezone.nontp) +- self.set_ntp_servers(data.timezone.ntpservers) ++ ++ servers = [] ++ ++ for hostname in data.timezone.ntpservers: ++ server = TimeSourceData() ++ server.type = TIME_SOURCE_SERVER ++ server.hostname = hostname ++ server.options = ["iburst"] ++ servers.append(server) ++ ++ self.set_time_sources(servers) + + def setup_kickstart(self, data): + """Set up the kickstart data.""" +@@ -78,8 +90,12 @@ def setup_kickstart(self, data): + data.timezone.isUtc = self.is_utc + data.timezone.nontp = not self.ntp_enabled + +- if self.ntp_enabled: +- data.timezone.ntpservers = list(self.ntp_servers) ++ if not self.ntp_enabled: ++ return ++ ++ data.timezone.ntpservers = [ ++ server.hostname for server in self.time_sources ++ ] + + @property + def timezone(self): +@@ -115,15 +131,15 @@ def set_ntp_enabled(self, ntp_enabled): + log.debug("NTP is set to %s.", ntp_enabled) + + @property +- def ntp_servers(self): +- """Return a list of NTP servers.""" +- return self._ntp_servers ++ def time_sources(self): ++ """Return a list of time sources.""" ++ return self._time_sources + +- def set_ntp_servers(self, servers): +- """Set NTP servers.""" +- self._ntp_servers = list(servers) +- self.ntp_servers_changed.emit() +- log.debug("NTP servers are set to %s.", servers) ++ def set_time_sources(self, servers): ++ """Set time sources.""" ++ self._time_sources = list(servers) ++ self.time_sources_changed.emit() ++ log.debug("Time sources are set to: %s", servers) + + def collect_requirements(self): + """Return installation requirements for this module. +@@ -168,6 +184,6 @@ def install_with_tasks(self): + ConfigureNTPTask( + sysroot=conf.target.system_root, + ntp_enabled=self.ntp_enabled, +- ntp_servers=self.ntp_servers ++ ntp_servers=self.time_sources + ) + ] +diff --git a/pyanaconda/modules/timezone/timezone_interface.py b/pyanaconda/modules/timezone/timezone_interface.py +index 03c5003f1e..f36e0b3723 100644 +--- a/pyanaconda/modules/timezone/timezone_interface.py ++++ b/pyanaconda/modules/timezone/timezone_interface.py +@@ -17,13 +17,15 @@ + # License and may only be used or replicated with the express permission of + # Red Hat, Inc. + # +-from pyanaconda.modules.common.constants.services import TIMEZONE +-from pyanaconda.modules.common.containers import TaskContainer + from dasbus.server.property import emits_properties_changed + from dasbus.typing import * # pylint: disable=wildcard-import +-from pyanaconda.modules.common.base import KickstartModuleInterface + from dasbus.server.interface import dbus_interface + ++from pyanaconda.modules.common.base import KickstartModuleInterface ++from pyanaconda.modules.common.constants.services import TIMEZONE ++from pyanaconda.modules.common.containers import TaskContainer ++from pyanaconda.modules.common.structures.timezone import TimeSourceData ++ + + @dbus_interface(TIMEZONE.interface_name) + class TimezoneInterface(KickstartModuleInterface): +@@ -34,7 +36,7 @@ def connect_signals(self): + self.watch_property("Timezone", self.implementation.timezone_changed) + self.watch_property("IsUTC", self.implementation.is_utc_changed) + self.watch_property("NTPEnabled", self.implementation.ntp_enabled_changed) +- self.watch_property("NTPServers", self.implementation.ntp_servers_changed) ++ self.watch_property("TimeSources", self.implementation.time_sources_changed) + + @property + def Timezone(self) -> Str: +@@ -91,22 +93,26 @@ def SetNTPEnabled(self, ntp_enabled: Bool): + self.implementation.set_ntp_enabled(ntp_enabled) + + @property +- def NTPServers(self) -> List[Str]: +- """A list of NTP servers. ++ def TimeSources(self) -> List[Structure]: ++ """A list of time sources. + +- :return: a list of servers ++ :return: a list of time source data ++ :rtype: a list of structures of the type TimeSourceData + """ +- return self.implementation.ntp_servers ++ return TimeSourceData.to_structure_list( ++ self.implementation.time_sources ++ ) + + @emits_properties_changed +- def SetNTPServers(self, servers: List[Str]): +- """Set the NTP servers. ++ def SetTimeSources(self, sources: List[Structure]): ++ """Set the time sources. + +- Example: [ntp.cesnet.cz] +- +- :param servers: a list of servers ++ :param sources: a list of time sources ++ :type sources: a list of structures of the type TimeSourceData + """ +- self.implementation.set_ntp_servers(servers) ++ self.implementation.set_time_sources( ++ TimeSourceData.from_structure_list(sources) ++ ) + + def ConfigureNTPServiceEnablementWithTask(self, ntp_excluded: Bool) -> ObjPath: + """Enable or disable NTP service. +diff --git a/tests/nosetests/pyanaconda_tests/module_timezone_test.py b/tests/nosetests/pyanaconda_tests/module_timezone_test.py +index f991f1e992..bb751d6f4b 100644 +--- a/tests/nosetests/pyanaconda_tests/module_timezone_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_timezone_test.py +@@ -23,8 +23,13 @@ + from shutil import copytree, copyfile + from unittest.mock import Mock, patch + ++from dasbus.structure import compare_data ++from dasbus.typing import * # pylint: disable=wildcard-import ++ ++from pyanaconda.core.constants import TIME_SOURCE_SERVER, TIME_SOURCE_POOL + from pyanaconda.modules.common.constants.services import TIMEZONE + from pyanaconda.modules.common.errors.installation import TimezoneConfigurationError ++from pyanaconda.modules.common.structures.timezone import TimeSourceData + from pyanaconda.modules.timezone.installation import ConfigureNTPTask, ConfigureTimezoneTask, \ + ConfigureNTPServiceEnablementTask + from pyanaconda.modules.common.structures.kickstart import KickstartReport +@@ -33,7 +38,7 @@ + from pyanaconda.ntp import NTP_CONFIG_FILE, NTPconfigError + from tests.nosetests.pyanaconda_tests import check_kickstart_interface, \ + patch_dbus_publish_object, PropertiesChangedCallback, check_task_creation, \ +- patch_dbus_get_proxy, check_task_creation_list ++ patch_dbus_get_proxy, check_task_creation_list, check_dbus_property + from pyanaconda.timezone import NTP_SERVICE + + +@@ -50,6 +55,14 @@ def setUp(self): + self.callback = PropertiesChangedCallback() + self.timezone_interface.PropertiesChanged.connect(self.callback) + ++ def _check_dbus_property(self, *args, **kwargs): ++ check_dbus_property( ++ self, ++ TIMEZONE, ++ self.timezone_interface, ++ *args, **kwargs ++ ) ++ + def kickstart_properties_test(self): + """Test kickstart properties.""" + self.assertEqual(self.timezone_interface.KickstartCommands, ["timezone"]) +@@ -76,12 +89,24 @@ def ntp_property_test(self): + self.assertEqual(self.timezone_interface.NTPEnabled, False) + self.callback.assert_called_once_with(TIMEZONE.interface_name, {'NTPEnabled': False}, []) + +- def ntp_servers_property_test(self): +- """Test the NTPServers property.""" +- self.timezone_interface.SetNTPServers(["ntp.cesnet.cz"]) +- self.assertEqual(self.timezone_interface.NTPServers, ["ntp.cesnet.cz"]) +- self.callback.assert_called_once_with( +- TIMEZONE.interface_name, {'NTPServers': ["ntp.cesnet.cz"]}, []) ++ def time_sources_property_test(self): ++ """Test the TimeSources property.""" ++ server = { ++ "type": get_variant(Str, TIME_SOURCE_SERVER), ++ "hostname": get_variant(Str, "ntp.cesnet.cz"), ++ "options": get_variant(List[Str], ["iburst"]), ++ } ++ ++ pool = { ++ "type": get_variant(Str, TIME_SOURCE_POOL), ++ "hostname": get_variant(Str, "0.fedora.pool.ntp.org"), ++ "options": get_variant(List[Str], []), ++ } ++ ++ self._check_dbus_property( ++ "TimeSources", ++ [server, pool] ++ ) + + def _test_kickstart(self, ks_in, ks_out): + check_kickstart_interface(self, self.timezone_interface, ks_in, ks_out) +@@ -162,10 +187,19 @@ def install_with_tasks_configured_test(self, publisher): + self.timezone_interface.SetNTPEnabled(False) + # --nontp and --ntpservers are mutually exclusive in kicstart but + # there is no such enforcement in the module so for testing this is ok +- self.timezone_interface.SetNTPServers([ +- "clock1.example.com", +- "clock2.example.com", +- ]) ++ ++ server = TimeSourceData() ++ server.type = TIME_SOURCE_SERVER ++ server.hostname = "clock1.example.com" ++ server.options = ["iburst"] ++ ++ pool = TimeSourceData() ++ pool.type = TIME_SOURCE_POOL ++ pool.hostname = "clock2.example.com" ++ ++ self.timezone_interface.SetTimeSources( ++ TimeSourceData.to_structure_list([server, pool]) ++ ) + + task_classes = [ + ConfigureTimezoneTask, +@@ -182,10 +216,9 @@ def install_with_tasks_configured_test(self, publisher): + # ConfigureNTPTask + obj = task_objs[1] + self.assertEqual(obj.implementation._ntp_enabled, False) +- self.assertEqual(obj.implementation._ntp_servers, [ +- "clock1.example.com", +- "clock2.example.com", +- ]) ++ self.assertEqual(len(obj.implementation._ntp_servers), 2) ++ self.assertTrue(compare_data(obj.implementation._ntp_servers[0], server)) ++ self.assertTrue(compare_data(obj.implementation._ntp_servers[1], pool)) + + @patch_dbus_publish_object + def configure_ntp_service_enablement_default_test(self, publisher): +@@ -354,13 +387,13 @@ class NTPTasksTestCase(unittest.TestCase): + + def ntp_task_success_test(self): + """Test the success cases for NTP setup D-Bus task.""" +- self._test_ntp_inputs(False, False, ["unique.ntp.server", "another.unique.server"]) +- self._test_ntp_inputs(False, True, ["unique.ntp.server", "another.unique.server"]) ++ self._test_ntp_inputs(False, False) ++ self._test_ntp_inputs(False, True) + + def ntp_overwrite_test(self): + """Test overwriting existing config for NTP setup D-Bus task.""" +- self._test_ntp_inputs(True, True, ["unique.ntp.server", "another.unique.server"]) +- self._test_ntp_inputs(True, False, ["unique.ntp.server", "another.unique.server"]) ++ self._test_ntp_inputs(True, True) ++ self._test_ntp_inputs(True, False) + + def ntp_save_failure_test(self): + """Test failure when saving NTP config in D-Bus task.""" +@@ -368,6 +401,25 @@ def ntp_save_failure_test(self): + self._test_ntp_exception(True) + self._test_ntp_exception(False) + ++ def _get_test_sources(self): ++ """Get a list of sources""" ++ server = TimeSourceData() ++ server.type = TIME_SOURCE_SERVER ++ server.hostname = "unique.ntp.server" ++ server.options = ["iburst"] ++ ++ pool = TimeSourceData() ++ pool.type = TIME_SOURCE_POOL ++ pool.hostname = "another.unique.server" ++ ++ return [server, pool] ++ ++ def _get_expected_lines(self): ++ return [ ++ "server unique.ntp.server iburst\n", ++ "pool another.unique.server\n" ++ ] ++ + @patch("pyanaconda.modules.timezone.installation.ntp.save_servers_to_config", + side_effect=NTPconfigError) + def _test_ntp_exception(self, make_chronyd, mock_save): +@@ -376,11 +428,14 @@ def _test_ntp_exception(self, make_chronyd, mock_save): + with self.assertLogs("anaconda.modules.timezone.installation", level="WARNING"): + self._execute_task(sysroot, True, ["ntp.example.com"]) + +- def _test_ntp_inputs(self, make_chronyd, ntp_enabled, ntp_servers): ++ def _test_ntp_inputs(self, make_chronyd, ntp_enabled): ++ ntp_servers = self._get_test_sources() ++ expected_lines = self._get_expected_lines() ++ + with tempfile.TemporaryDirectory() as sysroot: + self._setup_environment(sysroot, make_chronyd) + self._execute_task(sysroot, ntp_enabled, ntp_servers) +- self._validate_ntp_config(sysroot, make_chronyd, ntp_enabled, ntp_servers) ++ self._validate_ntp_config(sysroot, make_chronyd, ntp_enabled, expected_lines) + + def _setup_environment(self, sysroot, make_chronyd): + os.mkdir(sysroot + "/etc") +@@ -395,12 +450,14 @@ def _execute_task(self, sysroot, ntp_enabled, ntp_servers): + ) + task.run() + +- def _validate_ntp_config(self, sysroot, was_present, was_enabled, expected_servers): ++ def _validate_ntp_config(self, sysroot, was_present, was_enabled, expected_lines): + if was_enabled: + with open(sysroot + NTP_CONFIG_FILE) as fobj: +- all_lines = "\n".join(fobj.readlines()) +- for server in expected_servers: +- self.assertIn(server, all_lines) ++ all_lines = fobj.readlines() ++ ++ for line in expected_lines: ++ self.assertIn(line, all_lines) ++ + elif not was_present: + self.assertFalse(os.path.exists(sysroot + NTP_CONFIG_FILE)) + +-- +2.23.0 diff --git a/ntp-servers-improve-004-Use-the-structure-for-time-sources-in-anaconda-py.patch b/ntp-servers-improve-004-Use-the-structure-for-time-sources-in-anaconda-py.patch new file mode 100644 index 0000000..f1d8ca4 --- /dev/null +++ b/ntp-servers-improve-004-Use-the-structure-for-time-sources-in-anaconda-py.patch @@ -0,0 +1,113 @@ +From 19dea71f13d55d49e9dfbcc5d941afd5eb5d9e6d Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Fri, 3 Jul 2020 12:08:05 +0200 +Subject: [PATCH] Use the structure for time sources in anaconda.py + +Modify anaconda.py to work with TimeSourceData instead of strings. +--- + anaconda.py | 11 +-------- + pyanaconda/startup_utils.py | 45 +++++++++++++++++++++++++++---------- + 2 files changed, 34 insertions(+), 22 deletions(-) + +diff --git a/anaconda.py b/anaconda.py +index 1abdeb2e1a..d6bb57190c 100755 +--- a/anaconda.py ++++ b/anaconda.py +@@ -325,7 +325,6 @@ def setup_environment(): + + from pyanaconda import vnc + from pyanaconda import kickstart +- from pyanaconda import ntp + from pyanaconda import keyboard + # we are past the --version and --help shortcut so we can import display & + # startup_utils, which import Blivet, without slowing down anything critical +@@ -714,15 +713,7 @@ def _earlyExceptionHandler(ty, value, traceback): + geoloc.geoloc.refresh() + + # setup ntp servers and start NTP daemon if not requested otherwise +- if conf.system.can_set_time_synchronization: +- kickstart_ntpservers = timezone_proxy.NTPServers +- +- if kickstart_ntpservers: +- pools, servers = ntp.internal_to_pools_and_servers(kickstart_ntpservers) +- ntp.save_servers_to_config(pools, servers) +- +- if timezone_proxy.NTPEnabled: +- util.start_service("chronyd") ++ startup_utils.start_chronyd() + + # Finish the initialization of the setup on boot action. + # This should be done sooner and somewhere else once it is possible. +diff --git a/pyanaconda/startup_utils.py b/pyanaconda/startup_utils.py +index f08b19e11a..e53d5491c1 100644 +--- a/pyanaconda/startup_utils.py ++++ b/pyanaconda/startup_utils.py +@@ -17,29 +17,28 @@ + # License and may only be used or replicated with the express permission of + # Red Hat, Inc. + # +-from pyanaconda.core.configuration.anaconda import conf +-from pyanaconda.core.i18n import _ +- +-from pyanaconda.anaconda_loggers import get_stdout_logger, get_storage_logger, get_packaging_logger +-stdout_log = get_stdout_logger() +- +-from pyanaconda.anaconda_loggers import get_module_logger +-log = get_module_logger(__name__) +- + import sys + import time + import os ++import blivet + +-from pyanaconda.core import util, constants +-from pyanaconda import product ++from pyanaconda import product, ntp + from pyanaconda import anaconda_logging + from pyanaconda import network + from pyanaconda import safe_dbus + from pyanaconda import kickstart ++from pyanaconda.anaconda_loggers import get_stdout_logger, get_storage_logger, \ ++ get_packaging_logger, get_module_logger ++from pyanaconda.core import util, constants ++from pyanaconda.core.configuration.anaconda import conf ++from pyanaconda.core.i18n import _ + from pyanaconda.flags import flags + from pyanaconda.screensaver import inhibit_screensaver ++from pyanaconda.modules.common.structures.timezone import TimeSourceData ++from pyanaconda.modules.common.constants.services import TIMEZONE + +-import blivet ++stdout_log = get_stdout_logger() ++log = get_module_logger(__name__) + + + def gtk_warning(title, reason): +@@ -373,3 +372,25 @@ def parse_kickstart(ks, addon_paths, strict_mode=False): + kickstart.parseKickstart(ksdata, ks, strict_mode=strict_mode, pass_to_boss=True) + + return ksdata ++ ++ ++def start_chronyd(): ++ """Start the NTP daemon chronyd. ++ ++ Set up NTP servers and start NTP daemon if not requested otherwise. ++ """ ++ if not conf.system.can_set_time_synchronization: ++ log.debug("Skip the time synchronization.") ++ return ++ ++ timezone_proxy = TIMEZONE.get_proxy() ++ enabled = timezone_proxy.NTPEnabled ++ servers = TimeSourceData.from_structure_list( ++ timezone_proxy.TimeSources ++ ) ++ ++ if servers: ++ ntp.save_servers_to_config(servers) ++ ++ if enabled: ++ util.start_service("chronyd") +-- +2.23.0 diff --git a/ntp-servers-improve-005-Use-the-structure-for-time-sources-in-network-py.patch b/ntp-servers-improve-005-Use-the-structure-for-time-sources-in-network-py.patch new file mode 100644 index 0000000..1776584 --- /dev/null +++ b/ntp-servers-improve-005-Use-the-structure-for-time-sources-in-network-py.patch @@ -0,0 +1,80 @@ +From 4635e846a98182901777ab6de492020082f313cb Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Fri, 3 Jul 2020 12:12:33 +0200 +Subject: [PATCH] Use the structure for time sources in network.py + +Modify network.py to work with TimeSourceData instead of strings. +--- + pyanaconda/network.py | 31 +++++++++++++++++++++---------- + 1 file changed, 21 insertions(+), 10 deletions(-) + +diff --git a/pyanaconda/network.py b/pyanaconda/network.py +index f8e9b19a15..bce57354b1 100644 +--- a/pyanaconda/network.py ++++ b/pyanaconda/network.py +@@ -16,14 +16,7 @@ + # + # You should have received a copy of the GNU General Public License + # along with this program. If not, see . +- +-import gi +-gi.require_version("NM", "1.0") +- +-from gi.repository import NM +- + import shutil +-from pyanaconda.core import util, constants + import socket + import itertools + import os +@@ -34,17 +27,24 @@ + + from dasbus.typing import get_native + ++from pyanaconda.anaconda_loggers import get_module_logger ++from pyanaconda.core import util, constants + from pyanaconda.core.i18n import _ + from pyanaconda.core.kernel import kernel_arguments + from pyanaconda.core.regexes import HOSTNAME_PATTERN_WITHOUT_ANCHORS, \ + IPV6_ADDRESS_IN_DRACUT_IP_OPTION + from pyanaconda.core.configuration.anaconda import conf ++from pyanaconda.core.constants import TIME_SOURCE_SERVER + from pyanaconda.modules.common.constants.services import NETWORK, TIMEZONE, STORAGE + from pyanaconda.modules.common.constants.objects import FCOE + from pyanaconda.modules.common.task import sync_run_task + from pyanaconda.modules.common.structures.network import NetworkDeviceInfo ++from pyanaconda.modules.common.structures.timezone import TimeSourceData ++ ++import gi ++gi.require_version("NM", "1.0") ++from gi.repository import NM + +-from pyanaconda.anaconda_loggers import get_module_logger + log = get_module_logger(__name__) + + DEFAULT_HOSTNAME = "localhost.localdomain" +@@ -347,9 +347,20 @@ def _set_ntp_servers_from_dhcp(): + hostnames.append(hostname) + + # check if some NTP servers were specified from kickstart +- if not timezone_proxy.NTPServers and conf.target.is_hardware: ++ if not timezone_proxy.TimeSources and conf.target.is_hardware: + # no NTP servers were specified, add those from DHCP +- timezone_proxy.SetNTPServers(hostnames) ++ servers = [] ++ ++ for hostname in hostnames: ++ server = TimeSourceData() ++ server.type = TIME_SOURCE_SERVER ++ server.hostname = hostname ++ server.options = ["iburst"] ++ servers.append(server) ++ ++ timezone_proxy.SetTimeSources( ++ TimeSourceData.to_structure_list(servers) ++ ) + + + def wait_for_connected_NM(timeout=constants.NETWORK_CONNECTION_TIMEOUT, only_connecting=False): +-- +2.23.0 diff --git a/ntp-servers-improve-006-Add-support-for-the-NTP-server-status-cache.patch b/ntp-servers-improve-006-Add-support-for-the-NTP-server-status-cache.patch new file mode 100644 index 0000000..1b97cd0 --- /dev/null +++ b/ntp-servers-improve-006-Add-support-for-the-NTP-server-status-cache.patch @@ -0,0 +1,120 @@ +From 06ed7b6cee7baf64cf83411645bfa52a05767b92 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Mon, 6 Jul 2020 14:16:40 +0200 +Subject: [PATCH] Add support for the NTP server status cache + +Use the class NTPServerStatusCache to check the status of the given NTP server. +The cache remembers results of all checked host names. +--- + pyanaconda/ntp.py | 84 +++++++++++++++++++++++++++++++++++++++++++++-- + 1 file changed, 82 insertions(+), 2 deletions(-) + +diff --git a/pyanaconda/ntp.py b/pyanaconda/ntp.py +index 1b74ac9433..637d31f63e 100644 +--- a/pyanaconda/ntp.py ++++ b/pyanaconda/ntp.py +@@ -29,9 +29,12 @@ + import socket + + from pyanaconda import isys +-from pyanaconda.threading import threadMgr, AnacondaThread +-from pyanaconda.core.constants import THREAD_SYNC_TIME_BASENAME ++from pyanaconda.anaconda_loggers import get_module_logger ++from pyanaconda.core.i18n import N_, _ ++from pyanaconda.core.constants import THREAD_SYNC_TIME_BASENAME, NTP_SERVER_QUERY, \ ++ THREAD_NTP_SERVER_CHECK, NTP_SERVER_OK, NTP_SERVER_NOK + from pyanaconda.modules.common.structures.timezone import TimeSourceData ++from pyanaconda.threading import threadMgr, AnacondaThread + + NTP_CONFIG_FILE = "/etc/chrony.conf" + +@@ -42,6 +45,15 @@ + #treat pools as four servers with the same name + SERVERS_PER_POOL = 4 + ++# Description of an NTP server status. ++NTP_SERVER_STATUS_DESCRIPTIONS = { ++ NTP_SERVER_OK: N_("status: working"), ++ NTP_SERVER_NOK: N_("status: not working"), ++ NTP_SERVER_QUERY: N_("checking status") ++} ++ ++log = get_module_logger(__name__) ++ + + class NTPconfigError(Exception): + """Exception class for NTP related problems""" +@@ -226,3 +238,71 @@ def one_time_sync_async(server, callback=None): + target=_one_time_sync, + args=(server, callback) + )) ++ ++ ++class NTPServerStatusCache(object): ++ """The cache of NTP server states.""" ++ ++ def __init__(self): ++ self._cache = {} ++ ++ def get_status(self, server): ++ """Get the status of the given NTP server. ++ ++ :param TimeSourceData server: an NTP server ++ :return int: a status of the NTP server ++ """ ++ return self._cache.get( ++ server.hostname, ++ NTP_SERVER_QUERY ++ ) ++ ++ def get_status_description(self, server): ++ """Get the status description of the given NTP server. ++ ++ :param TimeSourceData server: an NTP server ++ :return str: a status description of the NTP server ++ """ ++ status = self.get_status(server) ++ return _(NTP_SERVER_STATUS_DESCRIPTIONS[status]) ++ ++ def check_status(self, server): ++ """Asynchronously check if given NTP servers appear to be working. ++ ++ :param TimeSourceData server: an NTP server ++ """ ++ # Get a hostname. ++ hostname = server.hostname ++ ++ # Reset the current status. ++ self._set_status(hostname, NTP_SERVER_QUERY) ++ ++ # Start the check. ++ threadMgr.add(AnacondaThread( ++ prefix=THREAD_NTP_SERVER_CHECK, ++ target=self._check_status, ++ args=(hostname, )) ++ ) ++ ++ def _set_status(self, hostname, status): ++ """Set the status of the given NTP server. ++ ++ :param str hostname: a hostname of an NTP server ++ :return int: a status of the NTP server ++ """ ++ self._cache[hostname] = status ++ ++ def _check_status(self, hostname): ++ """Check if an NTP server appears to be working. ++ ++ :param str hostname: a hostname of an NTP server ++ """ ++ log.debug("Checking NTP server %s", hostname) ++ result = ntp_server_working(hostname) ++ ++ if result: ++ log.debug("NTP server %s appears to be working.", hostname) ++ self._set_status(hostname, NTP_SERVER_OK) ++ else: ++ log.debug("NTP server %s appears not to be working.", hostname) ++ self._set_status(hostname, NTP_SERVER_NOK) +-- +2.23.0 diff --git a/ntp-servers-improve-007-Add-support-for-generating-a-summary-of-the-NTP-servers.patch b/ntp-servers-improve-007-Add-support-for-generating-a-summary-of-the-NTP-servers.patch new file mode 100644 index 0000000..721e007 --- /dev/null +++ b/ntp-servers-improve-007-Add-support-for-generating-a-summary-of-the-NTP-servers.patch @@ -0,0 +1,60 @@ +From 716db242314b710b881c073d290b6d1ad8670d36 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Mon, 6 Jul 2020 14:19:46 +0200 +Subject: [PATCH] Add support for generating a summary of the NTP servers + +Call the functions get_ntp_server_summary and get_ntp_servers_summary to +generate a string with a summary of the specified NTP servers and their +states. +--- + pyanaconda/ntp.py | 35 +++++++++++++++++++++++++++++++++++ + 1 file changed, 35 insertions(+) + +diff --git a/pyanaconda/ntp.py b/pyanaconda/ntp.py +index 637d31f63e..eed4b34307 100644 +--- a/pyanaconda/ntp.py ++++ b/pyanaconda/ntp.py +@@ -60,6 +60,41 @@ class NTPconfigError(Exception): + pass + + ++def get_ntp_server_summary(server, states): ++ """Generate a summary of an NTP server and its status. ++ ++ :param server: an NTP server ++ :type server: an instance of TimeSourceData ++ :param states: a cache of NTP server states ++ :type states: an instance of NTPServerStatusCache ++ :return: a string with a summary ++ """ ++ return "{} ({})".format( ++ server.hostname, ++ states.get_status_description(server) ++ ) ++ ++ ++def get_ntp_servers_summary(servers, states): ++ """Generate a summary of NTP servers and their states. ++ ++ :param servers: a list of NTP servers ++ :type servers: a list of TimeSourceData ++ :param states: a cache of NTP server states ++ :type states: an instance of NTPServerStatusCache ++ :return: a string with a summary ++ """ ++ summary = _("NTP servers:") ++ ++ for server in servers: ++ summary += "\n" + get_ntp_server_summary(server, states) ++ ++ if not servers: ++ summary += " " + _("not configured") ++ ++ return summary ++ ++ + def ntp_server_working(server_hostname): + """Tries to do an NTP request to the server (timeout may take some time). + +-- +2.23.0 diff --git a/ntp-servers-improve-008-Use-the-structure-for-time-sources-in-TUI.patch b/ntp-servers-improve-008-Use-the-structure-for-time-sources-in-TUI.patch new file mode 100644 index 0000000..0fd92ef --- /dev/null +++ b/ntp-servers-improve-008-Use-the-structure-for-time-sources-in-TUI.patch @@ -0,0 +1,468 @@ +From 8a10cee0ab94b844c65d1493b3d78df5210e4e34 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Fri, 3 Jul 2020 14:41:37 +0200 +Subject: [PATCH] Use the structure for time sources in TUI + +Modify TUI to work with TimeSourceData instead of strings. +--- + pyanaconda/ui/tui/spokes/time_spoke.py | 286 ++++++++++--------------- + 1 file changed, 109 insertions(+), 177 deletions(-) + +diff --git a/pyanaconda/ui/tui/spokes/time_spoke.py b/pyanaconda/ui/tui/spokes/time_spoke.py +index b93ab41eec..b88a17960f 100644 +--- a/pyanaconda/ui/tui/spokes/time_spoke.py ++++ b/pyanaconda/ui/tui/spokes/time_spoke.py +@@ -16,7 +16,10 @@ + # License and may only be used or replicated with the express permission of + # Red Hat, Inc. + # ++from pyanaconda.core.constants import TIME_SOURCE_SERVER + from pyanaconda.modules.common.constants.services import TIMEZONE ++from pyanaconda.modules.common.structures.timezone import TimeSourceData ++from pyanaconda.ntp import NTPServerStatusCache + from pyanaconda.ui.categories.localization import LocalizationCategory + from pyanaconda.ui.tui.spokes import NormalTUISpoke + from pyanaconda.ui.common import FirstbootSpokeMixIn +@@ -24,11 +27,9 @@ + from pyanaconda import ntp + from pyanaconda.core import constants + from pyanaconda.core.i18n import N_, _, C_ +-from pyanaconda.threading import threadMgr, AnacondaThread + from pyanaconda.flags import flags + +-from collections import OrderedDict, namedtuple +-from threading import RLock ++from collections import namedtuple + + from simpleline.render.containers import ListColumnContainer + from simpleline.render.screen import InputState +@@ -39,22 +40,10 @@ + from pyanaconda.anaconda_loggers import get_module_logger + log = get_module_logger(__name__) + +-CallbackTimezoneArgs = namedtuple("CallbackTimezoneArgs", ["region", "timezone"]) +- +- +-def format_ntp_status_list(servers): +- ntp_server_states = { +- constants.NTP_SERVER_OK: _("status: working"), +- constants.NTP_SERVER_NOK: _("status: not working"), +- constants.NTP_SERVER_QUERY: _("checking status") +- } +- status_list = [] +- for server, server_state in servers.items(): +- status_list.append("%s (%s)" % (server, ntp_server_states[server_state])) +- return status_list ++__all__ = ["TimeSpoke"] + + +-__all__ = ["TimeSpoke"] ++CallbackTimezoneArgs = namedtuple("CallbackTimezoneArgs", ["region", "timezone"]) + + + class TimeSpoke(FirstbootSpokeMixIn, NormalTUISpoke): +@@ -66,10 +55,8 @@ def __init__(self, data, storage, payload): + self.title = N_("Time settings") + self._timezone_spoke = None + self._container = None +- # we use an ordered dict to keep the NTP server insertion order +- self._ntp_servers = OrderedDict() +- self._ntp_servers_lock = RLock() +- ++ self._ntp_servers = [] ++ self._ntp_servers_states = NTPServerStatusCache() + self._timezone_module = TIMEZONE.get_proxy() + + @property +@@ -83,103 +70,24 @@ def initialize(self): + # during the installation + # - from config files when running in Initial Setup + # after the installation +- ntp_servers = [] +- + if constants.ANACONDA_ENVIRON in flags.environs: +- ntp_servers = self._timezone_module.NTPServers ++ self._ntp_servers = TimeSourceData.from_structure_list( ++ self._timezone_module.TimeSources ++ ) + elif constants.FIRSTBOOT_ENVIRON in flags.environs: +- ntp_servers = ntp.get_servers_from_config()[1] # returns a (NPT pools, NTP servers) tupple ++ self._ntp_servers = ntp.get_servers_from_config() + else: + log.error("tui time spoke: unsupported environment configuration %s," + "can't decide where to get initial NTP servers", flags.environs) + +- # check if the NTP servers appear to be working or not +- if ntp_servers: +- for server in ntp_servers: +- self._ntp_servers[server] = constants.NTP_SERVER_QUERY +- +- # check if the newly added NTP servers work fine +- self._check_ntp_servers_async(self._ntp_servers.keys()) ++ # check if the newly added NTP servers work fine ++ for server in self._ntp_servers: ++ self._ntp_servers_states.check_status(server) + + # we assume that the NTP spoke is initialized enough even if some NTP + # server check threads might still be running + self.initialize_done() + +- def _check_ntp_servers_async(self, servers): +- """Asynchronously check if given NTP servers appear to be working. +- +- :param list servers: list of servers to check +- """ +- for server in servers: +- threadMgr.add(AnacondaThread(prefix=constants.THREAD_NTP_SERVER_CHECK, +- target=self._check_ntp_server, +- args=(server,))) +- +- def _check_ntp_server(self, server): +- """Check if an NTP server appears to be working. +- +- :param str server: NTP server address +- :returns: True if the server appears to be working, False if not +- :rtype: bool +- """ +- log.debug("checking NTP server %s", server) +- result = ntp.ntp_server_working(server) +- if result: +- log.debug("NTP server %s appears to be working", server) +- self.set_ntp_server_status(server, constants.NTP_SERVER_OK) +- else: +- log.debug("NTP server %s appears not to be working", server) +- self.set_ntp_server_status(server, constants.NTP_SERVER_NOK) +- +- @property +- def ntp_servers(self): +- """Return a list of NTP servers known to the Time spoke. +- +- :returns: a list of NTP servers +- :rtype: list of strings +- """ +- return self._ntp_servers +- +- def add_ntp_server(self, server): +- """Add NTP server address to our internal NTP server tracking dictionary. +- +- :param str server: NTP server address to add +- """ +- # the add & remove operations should (at least at the moment) be never +- # called from different threads at the same time, but lets just use +- # a lock there when we are at it +- with self._ntp_servers_lock: +- if server not in self._ntp_servers: +- self._ntp_servers[server] = constants.NTP_SERVER_QUERY +- self._check_ntp_servers_async([server]) +- +- def remove_ntp_server(self, server): +- """Remove NTP server address from our internal NTP server tracking dictionary. +- +- :param str server: NTP server address to remove +- """ +- # the remove-server and set-server-status operations need to be atomic, +- # so that we avoid reintroducing removed servers by setting their status +- with self._ntp_servers_lock: +- if server in self._ntp_servers: +- del self._ntp_servers[server] +- +- def set_ntp_server_status(self, server, status): +- """Set status for an NTP server in the NTP server dict. +- +- The status can be "working", "not working" or "check in progress", +- and is defined by three constants defined in constants.py. +- +- :param str server: an NTP server +- :param int status: status of the NTP server +- """ +- +- # the remove-server and set-server-status operations need to be atomic, +- # so that we avoid reintroducing removed server by setting their status +- with self._ntp_servers_lock: +- if server in self._ntp_servers: +- self._ntp_servers[server] = status +- + @property + def timezone_spoke(self): + if not self._timezone_spoke: +@@ -210,6 +118,7 @@ def _summary_text(self): + :rtype: str + """ + msg = "" ++ + # timezone + kickstart_timezone = self._timezone_module.Timezone + timezone_msg = _("not set") +@@ -222,12 +131,10 @@ def _summary_text(self): + msg += "\n" + + # NTP +- msg += _("NTP servers:") +- if self._ntp_servers: +- for status in format_ntp_status_list(self._ntp_servers): +- msg += "\n%s" % status +- else: +- msg += _("not configured") ++ msg += ntp.get_ntp_servers_summary( ++ self._ntp_servers, ++ self._ntp_servers_states ++ ) + + return msg + +@@ -244,8 +151,15 @@ def refresh(self, args=None): + + self._container = ListColumnContainer(1, columns_width=78, spacing=1) + +- self._container.add(TextWidget(timezone_option), callback=self._timezone_callback) +- self._container.add(TextWidget(_("Configure NTP servers")), callback=self._configure_ntp_server_callback) ++ self._container.add( ++ TextWidget(timezone_option), ++ callback=self._timezone_callback ++ ) ++ ++ self._container.add( ++ TextWidget(_("Configure NTP servers")), ++ callback=self._configure_ntp_server_callback ++ ) + + self.window.add_with_separator(self._container) + +@@ -254,7 +168,13 @@ def _timezone_callback(self, data): + self.close() + + def _configure_ntp_server_callback(self, data): +- new_spoke = NTPServersSpoke(self.data, self.storage, self.payload, self) ++ new_spoke = NTPServersSpoke( ++ self.data, ++ self.storage, ++ self.payload, ++ self._ntp_servers, ++ self._ntp_servers_states ++ ) + ScreenHandler.push_screen_modal(new_spoke) + self.apply() + self.close() +@@ -268,7 +188,9 @@ def input(self, args, key): + + def apply(self): + # update the NTP server list in kickstart +- self._timezone_module.SetNTPServers(list(self.ntp_servers.keys())) ++ self._timezone_module.SetTimeSources( ++ TimeSourceData.to_structure_list(self._ntp_servers) ++ ) + + + class TimeZoneSpoke(NormalTUISpoke): +@@ -375,49 +297,55 @@ def apply(self): + class NTPServersSpoke(NormalTUISpoke): + category = LocalizationCategory + +- def __init__(self, data, storage, payload, time_spoke): ++ def __init__(self, data, storage, payload, servers, states): + super().__init__(data, storage, payload) + self.title = N_("NTP configuration") + self._container = None +- self._time_spoke = time_spoke ++ self._servers = servers ++ self._states = states + + @property + def indirect(self): + return True + +- def _summary_text(self): +- """Return summary of NTP configuration.""" +- msg = _("NTP servers:") +- if self._time_spoke.ntp_servers: +- for status in format_ntp_status_list(self._time_spoke.ntp_servers): +- msg += "\n%s" % status +- else: +- msg += _("no NTP servers have been configured") +- return msg +- + def refresh(self, args=None): + super().refresh(args) + +- summary = self._summary_text() ++ summary = ntp.get_ntp_servers_summary( ++ self._servers, ++ self._states ++ ) ++ + self.window.add_with_separator(TextWidget(summary)) + + self._container = ListColumnContainer(1, columns_width=78, spacing=1) +- + self._container.add(TextWidget(_("Add NTP server")), self._add_ntp_server) + + # only add the remove option when we can remove something +- if self._time_spoke.ntp_servers: ++ if self._servers: + self._container.add(TextWidget(_("Remove NTP server")), self._remove_ntp_server) + + self.window.add_with_separator(self._container) + + def _add_ntp_server(self, data): +- new_spoke = AddNTPServerSpoke(self.data, self.storage, self.payload, self._time_spoke) ++ new_spoke = AddNTPServerSpoke( ++ self.data, ++ self.storage, ++ self.payload, ++ self._servers, ++ self._states ++ ) + ScreenHandler.push_screen_modal(new_spoke) + self.redraw() + + def _remove_ntp_server(self, data): +- new_spoke = RemoveNTPServerSpoke(self.data, self.storage, self.payload, self._time_spoke) ++ new_spoke = RemoveNTPServerSpoke( ++ self.data, ++ self.storage, ++ self.payload, ++ self._servers, ++ self._states ++ ) + ScreenHandler.push_screen_modal(new_spoke) + self.redraw() + +@@ -434,12 +362,12 @@ def apply(self): + class AddNTPServerSpoke(NormalTUISpoke): + category = LocalizationCategory + +- def __init__(self, data, storage, payload, time_spoke): ++ def __init__(self, data, storage, payload, servers, states): + super().__init__(data, storage, payload) + self.title = N_("Add NTP server address") +- self._time_spoke = time_spoke +- self._new_ntp_server = None +- self.value = None ++ self._servers = servers ++ self._states = states ++ self._value = None + + @property + def indirect(self): +@@ -447,76 +375,80 @@ def indirect(self): + + def refresh(self, args=None): + super().refresh(args) +- self.value = None ++ self._value = None + + def prompt(self, args=None): + # the title is enough, no custom prompt is needed +- if self.value is None: # first run or nothing entered ++ if self._value is None: # first run or nothing entered + return Prompt(_("Enter an NTP server address and press %s") % Prompt.ENTER) + + # an NTP server address has been entered +- self._new_ntp_server = self.value ++ self._add_ntp_server(self._value) + +- self.apply() + self.close() + ++ def _add_ntp_server(self, server_hostname): ++ for server in self._servers: ++ if server.hostname == server_hostname: ++ return ++ ++ server = TimeSourceData() ++ server.type = TIME_SOURCE_SERVER ++ server.hostname = server_hostname ++ server.options = ["iburst"] ++ ++ self._servers.append(server) ++ self._states.check_status(server) ++ + def input(self, args, key): + # we accept any string as NTP server address, as we do an automatic + # working/not-working check on the address later +- self.value = key ++ self._value = key + return InputState.DISCARDED + + def apply(self): +- if self._new_ntp_server: +- self._time_spoke.add_ntp_server(self._new_ntp_server) ++ pass + + + class RemoveNTPServerSpoke(NormalTUISpoke): + category = LocalizationCategory + +- def __init__(self, data, storage, payload, timezone_spoke): ++ def __init__(self, data, storage, payload, servers, states): + super().__init__(data, storage, payload) + self.title = N_("Select an NTP server to remove") +- self._time_spoke = timezone_spoke +- self._ntp_server_index = None ++ self._servers = servers ++ self._states = states ++ self._container = None + + @property + def indirect(self): + return True + +- def _summary_text(self): +- """Return a numbered listing of NTP servers.""" +- msg = "" +- for index, status in enumerate(format_ntp_status_list(self._time_spoke.ntp_servers), start=1): +- msg += "%d) %s" % (index, status) +- if index < len(self._time_spoke.ntp_servers): +- msg += "\n" +- return msg +- + def refresh(self, args=None): + super().refresh(args) +- summary = self._summary_text() +- self.window.add_with_separator(TextWidget(summary)) ++ self._container = ListColumnContainer(1) + +- def input(self, args, key): +- try: +- num = int(key) +- except ValueError: +- return super().input(args, key) ++ for server in self._servers: ++ description = ntp.get_ntp_server_summary( ++ server, self._states ++ ) + +- # we expect a number corresponding to one of the NTP servers +- # in the listing - the server corresponding to the number will be +- # removed from the NTP server tracking (ordered) dict +- if num > 0 and num <= len(self._time_spoke.ntp_servers): +- self._ntp_server_index = num - 1 +- self.apply() ++ self._container.add( ++ TextWidget(description), ++ self._remove_ntp_server, ++ server ++ ) ++ ++ self.window.add_with_separator(self._container) ++ ++ def _remove_ntp_server(self, server): ++ self._servers.remove(server) ++ ++ def input(self, args, key): ++ if self._container.process_user_input(key): + return InputState.PROCESSED_AND_CLOSE +- else: +- # the user enter a number that is out of range of the +- # available NTP servers, ignore it and stay in spoke +- return InputState.DISCARDED ++ ++ return super().input(args, key) + + def apply(self): +- if self._ntp_server_index is not None: +- ntp_server_address = list(self._time_spoke.ntp_servers.keys())[self._ntp_server_index] +- self._time_spoke.remove_ntp_server(ntp_server_address) ++ pass +-- +2.23.0 diff --git a/ntp-servers-improve-009-Use-the-structure-for-time-sources-in-GUI.patch b/ntp-servers-improve-009-Use-the-structure-for-time-sources-in-GUI.patch new file mode 100644 index 0000000..304e4d2 --- /dev/null +++ b/ntp-servers-improve-009-Use-the-structure-for-time-sources-in-GUI.patch @@ -0,0 +1,620 @@ +From 15d2b2fb568df2c1a77cfb2baa703ae9f3da0f30 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Fri, 3 Jul 2020 13:38:10 +0200 +Subject: [PATCH] Use the structure for time sources in GUI + +Modify GUI to work with TimeSourceData instead of strings. +--- + pyanaconda/ui/gui/spokes/datetime_spoke.glade | 4 + + pyanaconda/ui/gui/spokes/datetime_spoke.py | 385 +++++++++--------- + 2 files changed, 187 insertions(+), 202 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/datetime_spoke.glade b/pyanaconda/ui/gui/spokes/datetime_spoke.glade +index 37c7c6edc0..49e33776f5 100644 +--- a/pyanaconda/ui/gui/spokes/datetime_spoke.glade ++++ b/pyanaconda/ui/gui/spokes/datetime_spoke.glade +@@ -87,6 +87,8 @@ + + + ++ ++ + + + +@@ -242,6 +244,8 @@ + + True + ++ ++ + + + 0 +diff --git a/pyanaconda/ui/gui/spokes/datetime_spoke.py b/pyanaconda/ui/gui/spokes/datetime_spoke.py +index 00b1bd9d56..ea121e7e4d 100644 +--- a/pyanaconda/ui/gui/spokes/datetime_spoke.py ++++ b/pyanaconda/ui/gui/spokes/datetime_spoke.py +@@ -16,47 +16,48 @@ + # License and may only be used or replicated with the express permission of + # Red Hat, Inc. + # ++import datetime ++import re ++import time ++import locale as locale_mod ++import functools ++import copy + ++from pyanaconda import isys ++from pyanaconda import network ++from pyanaconda import ntp ++from pyanaconda import flags + from pyanaconda.anaconda_loggers import get_module_logger +-log = get_module_logger(__name__) +- +-import gi +-gi.require_version("Gdk", "3.0") +-gi.require_version("Gtk", "3.0") +-gi.require_version("TimezoneMap", "1.0") +- +-from gi.repository import Gdk, Gtk, TimezoneMap +- ++from pyanaconda.core import util, constants ++from pyanaconda.core.async_utils import async_action_wait, async_action_nowait ++from pyanaconda.core.configuration.anaconda import conf ++from pyanaconda.core.constants import TIME_SOURCE_POOL, TIME_SOURCE_SERVER ++from pyanaconda.core.i18n import _, CN_ ++from pyanaconda.core.timer import Timer ++from pyanaconda.localization import get_xlated_timezone, resolve_date_format ++from pyanaconda.modules.common.structures.timezone import TimeSourceData ++from pyanaconda.modules.common.constants.services import TIMEZONE, NETWORK ++from pyanaconda.ntp import NTPServerStatusCache + from pyanaconda.ui.communication import hubQ + from pyanaconda.ui.common import FirstbootSpokeMixIn + from pyanaconda.ui.gui import GUIObject + from pyanaconda.ui.gui.spokes import NormalSpoke + from pyanaconda.ui.categories.localization import LocalizationCategory +-from pyanaconda.ui.gui.utils import gtk_call_once, override_cell_property ++from pyanaconda.ui.gui.utils import override_cell_property + from pyanaconda.ui.gui.utils import blockedHandler + from pyanaconda.ui.gui.helpers import GUIDialogInputCheckHandler + from pyanaconda.ui.helpers import InputCheck +- +-from pyanaconda.core import util, constants +-from pyanaconda.core.configuration.anaconda import conf +-from pyanaconda import isys +-from pyanaconda import network +-from pyanaconda import ntp +-from pyanaconda import flags +-from pyanaconda.modules.common.constants.services import TIMEZONE, NETWORK +-from pyanaconda.threading import threadMgr, AnacondaThread +-from pyanaconda.core.i18n import _, CN_ +-from pyanaconda.core.async_utils import async_action_wait, async_action_nowait + from pyanaconda.timezone import NTP_SERVICE, get_all_regions_and_timezones, get_timezone, is_valid_timezone +-from pyanaconda.localization import get_xlated_timezone, resolve_date_format +-from pyanaconda.core.timer import Timer ++from pyanaconda.threading import threadMgr, AnacondaThread + +-import datetime +-import re +-import threading +-import time +-import locale as locale_mod +-import functools ++import gi ++gi.require_version("Gdk", "3.0") ++gi.require_version("Gtk", "3.0") ++gi.require_version("TimezoneMap", "1.0") ++ ++from gi.repository import Gdk, Gtk, TimezoneMap ++ ++log = get_module_logger(__name__) + + __all__ = ["DatetimeSpoke"] + +@@ -64,6 +65,7 @@ + SERVER_POOL = 1 + SERVER_WORKING = 2 + SERVER_USE = 3 ++SERVER_OBJECT = 4 + + DEFAULT_TZ = "Asia/Shanghai" + +@@ -156,97 +158,49 @@ def _new_date_field_box(store): + return (box, combo, suffix_label) + + +-class NTPconfigDialog(GUIObject, GUIDialogInputCheckHandler): ++class NTPConfigDialog(GUIObject, GUIDialogInputCheckHandler): + builderObjects = ["ntpConfigDialog", "addImage", "serversStore"] + mainWidgetName = "ntpConfigDialog" + uiFile = "spokes/datetime_spoke.glade" + +- def __init__(self, data, timezone_module): ++ def __init__(self, data, servers, states): + GUIObject.__init__(self, data) ++ self._servers = servers ++ self._active_server = None ++ self._states = states + + # Use GUIDIalogInputCheckHandler to manipulate the sensitivity of the + # add button, and check for valid input in on_entry_activated + add_button = self.builder.get_object("addButton") + GUIDialogInputCheckHandler.__init__(self, add_button) + +- #epoch is increased when serversStore is repopulated +- self._epoch = 0 +- self._epoch_lock = threading.Lock() +- self._timezone_module = timezone_module +- +- @property +- def working_server(self): +- for row in self._serversStore: +- if row[SERVER_WORKING] == constants.NTP_SERVER_OK and row[SERVER_USE]: +- #server is checked and working +- return row[SERVER_HOSTNAME] +- +- return None +- +- @property +- def pools_servers(self): +- pools = list() +- servers = list() +- +- for used_row in (row for row in self._serversStore if row[SERVER_USE]): +- if used_row[SERVER_POOL]: +- pools.append(used_row[SERVER_HOSTNAME]) +- else: +- servers.append(used_row[SERVER_HOSTNAME]) +- +- return (pools, servers) +- +- def _render_working(self, column, renderer, model, itr, user_data=None): +- value = model[itr][SERVER_WORKING] +- +- if value == constants.NTP_SERVER_QUERY: +- return "dialog-question" +- elif value == constants.NTP_SERVER_OK: +- return "emblem-default" +- else: +- return "dialog-error" +- +- def initialize(self): + self.window.set_size_request(500, 400) + +- workingColumn = self.builder.get_object("workingColumn") +- workingRenderer = self.builder.get_object("workingRenderer") +- override_cell_property(workingColumn, workingRenderer, "icon-name", +- self._render_working) ++ working_column = self.builder.get_object("workingColumn") ++ working_renderer = self.builder.get_object("workingRenderer") ++ override_cell_property(working_column, working_renderer, "icon-name", self._render_working) + + self._serverEntry = self.builder.get_object("serverEntry") + self._serversStore = self.builder.get_object("serversStore") +- + self._addButton = self.builder.get_object("addButton") +- + self._poolCheckButton = self.builder.get_object("poolCheckButton") + +- # Validate the server entry box +- self._serverCheck = self.add_check(self._serverEntry, self._validateServer) ++ self._serverCheck = self.add_check(self._serverEntry, self._validate_server) + self._serverCheck.update_check_status() + +- self._initialize_store_from_config() +- +- def _initialize_store_from_config(self): +- self._serversStore.clear() ++ self._update_timer = Timer() + +- kickstart_ntp_servers = self._timezone_module.NTPServers ++ def _render_working(self, column, renderer, model, itr, user_data=None): ++ value = self._serversStore[itr][SERVER_WORKING] + +- if kickstart_ntp_servers: +- pools, servers = ntp.internal_to_pools_and_servers(kickstart_ntp_servers) ++ if value == constants.NTP_SERVER_QUERY: ++ return "dialog-question" ++ elif value == constants.NTP_SERVER_OK: ++ return "emblem-default" + else: +- try: +- pools, servers = ntp.get_servers_from_config() +- except ntp.NTPconfigError: +- log.warning("Failed to load NTP servers configuration") +- return +- +- for pool in pools: +- self._add_server(pool, True) +- for server in servers: +- self._add_server(server, False) ++ return "dialog-error" + +- def _validateServer(self, inputcheck): ++ def _validate_server(self, inputcheck): + server = self.get_input(inputcheck.input_obj) + + # If not set, fail the check to keep the button insensitive, but don't +@@ -261,108 +215,97 @@ def _validateServer(self, inputcheck): + return InputCheck.CHECK_OK + + def refresh(self): +- self._initialize_store_from_config() +- self._serverEntry.grab_focus() ++ # Update the store. ++ self._serversStore.clear() + +- def refresh_servers_state(self): +- itr = self._serversStore.get_iter_first() +- while itr: +- self._refresh_server_working(itr) +- itr = self._serversStore.iter_next(itr) ++ for server in self._servers: ++ self._add_row(server) ++ ++ # Start to update the status. ++ self._update_timer.timeout_sec(1, self._update_rows) ++ ++ # Focus on the server entry. ++ self._serverEntry.grab_focus() + + def run(self): + self.window.show() + rc = self.window.run() + self.window.hide() + +- #OK clicked ++ # OK clicked + if rc == 1: +- new_pools, new_servers = self.pools_servers ++ # Remove servers. ++ for row in self._serversStore: ++ if not row[SERVER_USE]: ++ server = row[SERVER_OBJECT] ++ self._servers.remove(server) + ++ # Restart the NTP service. + if conf.system.can_set_time_synchronization: +- ntp.save_servers_to_config(new_pools, new_servers) ++ ntp.save_servers_to_config(self._servers) + util.restart_service(NTP_SERVICE) + +- #Cancel clicked, window destroyed... +- else: +- self._epoch_lock.acquire() +- self._epoch += 1 +- self._epoch_lock.release() +- + return rc + +- def _set_server_ok_nok(self, itr, epoch_started): +- """ +- If the server is working, set its data to NTP_SERVER_OK, otherwise set its +- data to NTP_SERVER_NOK. +- +- :param itr: iterator of the $server's row in the self._serversStore ++ def _add_row(self, server): ++ """Add a new row for the given NTP server. + ++ :param server: an NTP server ++ :type server: an instance of TimeSourceData + """ ++ itr = self._serversStore.append([ ++ "", ++ False, ++ constants.NTP_SERVER_QUERY, ++ True, ++ server ++ ]) ++ ++ self._refresh_row(itr) ++ ++ def _refresh_row(self, itr): ++ """Refresh the given row.""" ++ server = self._serversStore[itr][SERVER_OBJECT] ++ self._serversStore.set_value(itr, SERVER_HOSTNAME, server.hostname) ++ self._serversStore.set_value(itr, SERVER_POOL, server.type == TIME_SOURCE_POOL) ++ ++ def _update_rows(self): ++ """Periodically update the status of all rows. ++ ++ :return: True to repeat, otherwise False ++ """ ++ for row in self._serversStore: ++ server = row[SERVER_OBJECT] + +- @async_action_nowait +- def set_store_value(arg_tuple): +- """ +- We need a function for this, because this way it can be added to +- the MainLoop with thread-safe async_action_nowait (but only with one +- argument). +- +- :param arg_tuple: (store, itr, column, value) +- +- """ +- +- (store, itr, column, value) = arg_tuple +- store.set_value(itr, column, value) +- +- orig_hostname = self._serversStore[itr][SERVER_HOSTNAME] +- server_working = ntp.ntp_server_working(self._serversStore[itr][SERVER_HOSTNAME]) +- +- #do not let dialog change epoch while we are modifying data +- self._epoch_lock.acquire() +- +- #check if we are in the same epoch as the dialog (and the serversStore) +- #and if the server wasn't changed meanwhile +- if epoch_started == self._epoch: +- actual_hostname = self._serversStore[itr][SERVER_HOSTNAME] ++ if server is self._active_server: ++ continue + +- if orig_hostname == actual_hostname: +- if server_working: +- set_store_value((self._serversStore, +- itr, SERVER_WORKING, constants.NTP_SERVER_OK)) +- else: +- set_store_value((self._serversStore, +- itr, SERVER_WORKING, constants.NTP_SERVER_NOK)) +- self._epoch_lock.release() ++ status = self._states.get_status(server) ++ row[SERVER_WORKING] = status + +- @async_action_nowait +- def _refresh_server_working(self, itr): +- """ Runs a new thread with _set_server_ok_nok(itr) as a taget. """ +- +- self._serversStore.set_value(itr, SERVER_WORKING, constants.NTP_SERVER_QUERY) +- threadMgr.add(AnacondaThread(prefix=constants.THREAD_NTP_SERVER_CHECK, +- target=self._set_server_ok_nok, +- args=(itr, self._epoch))) ++ return True + +- def _add_server(self, server, pool=False): +- """ +- Checks if a given server is a valid hostname and if yes, adds it +- to the list of servers. ++ def on_entry_activated(self, entry, *args): ++ # Check that the input check has passed ++ if self._serverCheck.check_status != InputCheck.CHECK_OK: ++ return + +- :param server: string containing hostname ++ server = TimeSourceData() + +- """ ++ if self._poolCheckButton.get_active(): ++ server.type = TIME_SOURCE_POOL ++ else: ++ server.type = TIME_SOURCE_SERVER + +- itr = self._serversStore.append([server, pool, constants.NTP_SERVER_QUERY, True]) ++ server.hostname = entry.get_text() ++ server.options = ["iburst"] + +- #do not block UI while starting thread (may take some time) +- self._refresh_server_working(itr) ++ self._servers.append(server) ++ self._states.check_status(server) ++ self._add_row(server) + +- def on_entry_activated(self, entry, *args): +- # Check that the input check has passed +- if self._serverCheck.check_status == InputCheck.CHECK_OK: +- self._add_server(entry.get_text(), self._poolCheckButton.get_active()) +- entry.set_text("") +- self._poolCheckButton.set_active(False) ++ entry.set_text("") ++ self._poolCheckButton.set_active(False) + + def on_add_clicked(self, *args): + self._serverEntry.emit("activate") +@@ -370,16 +313,29 @@ def on_add_clicked(self, *args): + def on_use_server_toggled(self, renderer, path, *args): + itr = self._serversStore.get_iter(path) + old_value = self._serversStore[itr][SERVER_USE] +- + self._serversStore.set_value(itr, SERVER_USE, not old_value) + + def on_pool_toggled(self, renderer, path, *args): + itr = self._serversStore.get_iter(path) +- old_value = self._serversStore[itr][SERVER_POOL] ++ server = self._serversStore[itr][SERVER_OBJECT] ++ ++ if server.type == TIME_SOURCE_SERVER: ++ server.type = TIME_SOURCE_POOL ++ else: ++ server.type = TIME_SOURCE_SERVER ++ ++ self._refresh_row(itr) ++ ++ def on_server_editing_started(self, renderer, editable, path): ++ itr = self._serversStore.get_iter(path) ++ self._active_server = self._serversStore[itr][SERVER_OBJECT] + +- self._serversStore.set_value(itr, SERVER_POOL, not old_value) ++ def on_server_editing_canceled(self, renderer): ++ self._active_server = None + + def on_server_edited(self, renderer, path, new_text, *args): ++ self._active_server = None ++ + if not path: + return + +@@ -389,14 +345,14 @@ def on_server_edited(self, renderer, path, new_text, *args): + return + + itr = self._serversStore.get_iter(path) ++ server = self._serversStore[itr][SERVER_OBJECT] + +- if self._serversStore[itr][SERVER_HOSTNAME] == new_text: ++ if server.hostname == new_text: + return + +- self._serversStore.set_value(itr, SERVER_HOSTNAME, new_text) +- self._serversStore.set_value(itr, SERVER_WORKING, constants.NTP_SERVER_QUERY) +- +- self._refresh_server_working(itr) ++ server.hostname = new_text ++ self._states.check_status(server) ++ self._refresh_row(itr) + + + class DatetimeSpoke(FirstbootSpokeMixIn, NormalSpoke): +@@ -440,6 +396,9 @@ def __init__(self, *args): + self._timezone_module = TIMEZONE.get_proxy() + self._network_module = NETWORK.get_proxy() + ++ self._ntp_servers = [] ++ self._ntp_servers_states = NTPServerStatusCache() ++ + def initialize(self): + NormalSpoke.initialize(self) + self.initialize_start() +@@ -512,9 +471,6 @@ def initialize(self): + if not conf.system.can_set_system_clock: + self._hide_date_time_setting() + +- self._config_dialog = NTPconfigDialog(self.data, self._timezone_module) +- self._config_dialog.initialize() +- + threadMgr.add(AnacondaThread(name=constants.THREAD_DATE_TIME, + target=self._initialize)) + +@@ -634,12 +590,27 @@ def refresh(self): + + self._update_datetime() + ++ # update the ntp configuration ++ self._ntp_servers = TimeSourceData.from_structure_list( ++ self._timezone_module.TimeSources ++ ) ++ ++ if not self._ntp_servers: ++ try: ++ self._ntp_servers = ntp.get_servers_from_config() ++ except ntp.NTPconfigError: ++ log.warning("Failed to load NTP servers configuration") ++ ++ self._ntp_servers_states = NTPServerStatusCache() + has_active_network = self._network_module.Connected ++ + if not has_active_network: + self._show_no_network_warning() + else: + self.clear_info() +- gtk_call_once(self._config_dialog.refresh_servers_state) ++ ++ for server in self._ntp_servers: ++ self._ntp_servers_states.check_status(server) + + if conf.system.can_set_time_synchronization: + ntp_working = has_active_network and util.service_running(NTP_SERVICE) +@@ -867,13 +838,10 @@ def _set_combo_selection(self, combo, item): + return False + + def _get_combo_selection(self, combo): +- """ +- Get the selected item of the combobox. ++ """Get the selected item of the combobox. + + :return: selected item or None +- + """ +- + model = combo.get_model() + itr = combo.get_active_iter() + if not itr or not model: +@@ -946,9 +914,7 @@ def on_updown_ampm_clicked(self, *args): + def on_region_changed(self, combo, *args): + """ + :see: on_city_changed +- + """ +- + region = self._get_active_region() + + if not region or region == self._old_region: +@@ -974,9 +940,7 @@ def on_city_changed(self, combo, *args): + hit etc.; 'London' chosen in the expanded combobox => update timezone + map and do all necessary actions). Fortunately when entry is being + edited, self._get_active_city returns None. +- + """ +- + timezone = None + + region = self._get_active_region() +@@ -1107,8 +1071,17 @@ def _set_date_time_setting_sensitive(self, sensitive): + footer_alignment = self.builder.get_object("footerAlignment") + footer_alignment.set_sensitive(sensitive) + ++ def _get_working_server(self): ++ """Get a working NTP server.""" ++ for server in self._ntp_servers: ++ status = self._ntp_servers_states.get_status(server) ++ if status == constants.NTP_SERVER_OK: ++ return server ++ ++ return None ++ + def _show_no_network_warning(self): +- self.set_warning(_("You need to set up networking first if you "\ ++ self.set_warning(_("You need to set up networking first if you " + "want to use NTP")) + + def _show_no_ntp_server_warning(self): +@@ -1127,13 +1100,13 @@ def on_ntp_switched(self, switch, *args): + return + else: + self.clear_info() ++ working_server = self._get_working_server() + +- working_server = self._config_dialog.working_server + if working_server is None: + self._show_no_ntp_server_warning() + else: +- #we need a one-time sync here, because chronyd would not change +- #the time as drastically as we need ++ # We need a one-time sync here, because chronyd would ++ # not change the time as drastically as we need. + ntp.one_time_sync_async(working_server) + + ret = util.start_service(NTP_SERVICE) +@@ -1161,16 +1134,24 @@ def on_ntp_switched(self, switch, *args): + self.clear_info() + + def on_ntp_config_clicked(self, *args): +- self._config_dialog.refresh() ++ servers = copy.deepcopy(self._ntp_servers) ++ states = self._ntp_servers_states + +- with self.main_window.enlightbox(self._config_dialog.window): +- response = self._config_dialog.run() ++ dialog = NTPConfigDialog(self.data, servers, states) ++ dialog.refresh() ++ ++ with self.main_window.enlightbox(dialog.window): ++ response = dialog.run() + + if response == 1: +- pools, servers = self._config_dialog.pools_servers +- self._timezone_module.SetNTPServers(ntp.pools_servers_to_internal(pools, servers)) ++ self._timezone_module.SetTimeSources( ++ TimeSourceData.to_structure_list(servers) ++ ) ++ ++ self._ntp_servers = servers ++ working_server = self._get_working_server() + +- if self._config_dialog.working_server is None: ++ if working_server is None: + self._show_no_ntp_server_warning() + else: + self.clear_info() +-- +2.23.0 diff --git a/ntp-servers-improve-010-Add-support-for-the-timesource-kickstart-command.patch b/ntp-servers-improve-010-Add-support-for-the-timesource-kickstart-command.patch new file mode 100644 index 0000000..b5fb862 --- /dev/null +++ b/ntp-servers-improve-010-Add-support-for-the-timesource-kickstart-command.patch @@ -0,0 +1,284 @@ +From 61fe3f12215bceebde71c35dc7ef14dbc17bb4d7 Mon Sep 17 00:00:00 2001 +From: Vendula Poncova +Date: Fri, 3 Jul 2020 18:29:33 +0200 +Subject: [PATCH] Add support for the timesource kickstart command + +The Timezone module should handle the timesource kickstart command. +--- + anaconda.spec.in | 2 +- + pyanaconda/core/kickstart/commands.py | 4 +- + pyanaconda/kickstart.py | 1 + + pyanaconda/modules/timezone/kickstart.py | 5 ++ + pyanaconda/modules/timezone/timezone.py | 69 +++++++++++++--- + .../pyanaconda_tests/module_timezone_test.py | 79 ++++++++++++++++++- + 6 files changed, 141 insertions(+), 19 deletions(-) + +diff --git a/anaconda.spec.in b/anaconda.spec.in +index 83adeb9089..c76181d363 100644 +--- a/anaconda.spec.in ++++ b/anaconda.spec.in +@@ -33,7 +33,7 @@ Source0: %{name}-%{version}.tar.bz2 + %define libxklavierver 5.4 + %define mehver 0.23-1 + %define nmver 1.0 +-%define pykickstartver 3.25-1 ++%define pykickstartver 3.27-1 + %define pypartedver 2.5-2 + %define rpmver 4.10.0 + %define simplelinever 1.1-1 +diff --git a/pyanaconda/core/kickstart/commands.py b/pyanaconda/core/kickstart/commands.py +index 590027dd33..3c3eed03e2 100644 +--- a/pyanaconda/core/kickstart/commands.py ++++ b/pyanaconda/core/kickstart/commands.py +@@ -76,7 +76,8 @@ + from pykickstart.commands.sshpw import F24_SshPw as SshPw + from pykickstart.commands.sshkey import F22_SshKey as SshKey + from pykickstart.commands.syspurpose import RHEL8_Syspurpose as Syspurpose +-from pykickstart.commands.timezone import F32_Timezone as Timezone ++from pykickstart.commands.timezone import F33_Timezone as Timezone ++from pykickstart.commands.timesource import F33_Timesource as Timesource + from pykickstart.commands.updates import F7_Updates as Updates + from pykickstart.commands.url import F30_Url as Url + from pykickstart.commands.user import F24_User as User +@@ -107,6 +108,7 @@ + from pykickstart.commands.snapshot import F26_SnapshotData as SnapshotData + from pykickstart.commands.sshpw import F24_SshPwData as SshPwData + from pykickstart.commands.sshkey import F22_SshKeyData as SshKeyData ++from pykickstart.commands.timesource import F33_TimesourceData as TimesourceData + from pykickstart.commands.user import F19_UserData as UserData + from pykickstart.commands.volgroup import F21_VolGroupData as VolGroupData + from pykickstart.commands.zfcp import F14_ZFCPData as ZFCPData +diff --git a/pyanaconda/kickstart.py b/pyanaconda/kickstart.py +index d2fcaab44d..946da8bc95 100644 +--- a/pyanaconda/kickstart.py ++++ b/pyanaconda/kickstart.py +@@ -372,6 +372,7 @@ def finalize(self): + "sshkey" : UselessCommand, + "skipx": UselessCommand, + "snapshot": UselessCommand, ++ "timesource": UselessCommand, + "timezone": UselessCommand, + "url": UselessCommand, + "user": UselessCommand, +diff --git a/pyanaconda/modules/timezone/kickstart.py b/pyanaconda/modules/timezone/kickstart.py +index 7115322677..b94e4129c3 100644 +--- a/pyanaconda/modules/timezone/kickstart.py ++++ b/pyanaconda/modules/timezone/kickstart.py +@@ -24,4 +24,9 @@ class TimezoneKickstartSpecification(KickstartSpecification): + + commands = { + "timezone": COMMANDS.Timezone, ++ "timesource": COMMANDS.Timesource, ++ } ++ ++ commands_data = { ++ "TimesourceData": COMMANDS.TimesourceData, + } +diff --git a/pyanaconda/modules/timezone/timezone.py b/pyanaconda/modules/timezone/timezone.py +index ff89d1ea77..b7fd5b6430 100644 +--- a/pyanaconda/modules/timezone/timezone.py ++++ b/pyanaconda/modules/timezone/timezone.py +@@ -17,8 +17,11 @@ + # License and may only be used or replicated with the express permission of + # Red Hat, Inc. + # ++from pykickstart.errors import KickstartParseError ++ ++from pyanaconda.core.i18n import _ + from pyanaconda.core.configuration.anaconda import conf +-from pyanaconda.core.constants import TIME_SOURCE_SERVER ++from pyanaconda.core.constants import TIME_SOURCE_SERVER, TIME_SOURCE_POOL + from pyanaconda.core.dbus import DBus + from pyanaconda.core.signal import Signal + from pyanaconda.modules.common.base import KickstartService +@@ -73,29 +76,69 @@ def process_kickstart(self, data): + self.set_is_utc(data.timezone.isUtc) + self.set_ntp_enabled(not data.timezone.nontp) + +- servers = [] ++ sources = [] + + for hostname in data.timezone.ntpservers: +- server = TimeSourceData() +- server.type = TIME_SOURCE_SERVER +- server.hostname = hostname +- server.options = ["iburst"] +- servers.append(server) +- +- self.set_time_sources(servers) ++ source = TimeSourceData() ++ source.type = TIME_SOURCE_SERVER ++ source.hostname = hostname ++ source.options = ["iburst"] ++ sources.append(source) ++ ++ for source_data in data.timesource.dataList(): ++ if source_data.ntp_disable: ++ self.set_ntp_enabled(False) ++ continue ++ ++ source = TimeSourceData() ++ source.options = ["iburst"] ++ ++ if source_data.ntp_server: ++ source.type = TIME_SOURCE_SERVER ++ source.hostname = source_data.ntp_server ++ elif source_data.ntp_pool: ++ source.type = TIME_SOURCE_POOL ++ source.hostname = source_data.ntp_pool ++ else: ++ KickstartParseError( ++ _("Invalid time source."), ++ lineno=source_data.lineno ++ ) ++ ++ if source_data.nts: ++ source.options.append("nts") ++ ++ sources.append(source) ++ ++ self.set_time_sources(sources) + + def setup_kickstart(self, data): + """Set up the kickstart data.""" + data.timezone.timezone = self.timezone + data.timezone.isUtc = self.is_utc +- data.timezone.nontp = not self.ntp_enabled ++ source_data_list = data.timesource.dataList() + + if not self.ntp_enabled: ++ source_data = data.TimesourceData() ++ source_data.ntp_disable = True ++ source_data_list.append(source_data) + return + +- data.timezone.ntpservers = [ +- server.hostname for server in self.time_sources +- ] ++ for source in self.time_sources: ++ source_data = data.TimesourceData() ++ ++ if source.type == TIME_SOURCE_SERVER: ++ source_data.ntp_server = source.hostname ++ elif source.type == TIME_SOURCE_POOL: ++ source_data.ntp_pool = source.hostname ++ else: ++ log.warning("Skipping %s.", source) ++ continue ++ ++ if "nts" in source.options: ++ source_data.nts = True ++ ++ source_data_list.append(source_data) + + @property + def timezone(self): +diff --git a/tests/nosetests/pyanaconda_tests/module_timezone_test.py b/tests/nosetests/pyanaconda_tests/module_timezone_test.py +index bb751d6f4b..dab857e034 100644 +--- a/tests/nosetests/pyanaconda_tests/module_timezone_test.py ++++ b/tests/nosetests/pyanaconda_tests/module_timezone_test.py +@@ -65,7 +65,7 @@ def _check_dbus_property(self, *args, **kwargs): + + def kickstart_properties_test(self): + """Test kickstart properties.""" +- self.assertEqual(self.timezone_interface.KickstartCommands, ["timezone"]) ++ self.assertEqual(self.timezone_interface.KickstartCommands, ["timezone", "timesource"]) + self.assertEqual(self.timezone_interface.KickstartSections, []) + self.assertEqual(self.timezone_interface.KickstartAddons, []) + self.callback.assert_not_called() +@@ -143,19 +143,90 @@ def kickstart2_test(self): + timezone --utc --nontp Europe/Prague + """ + ks_out = """ ++ timesource --ntp-disable + # System timezone +- timezone Europe/Prague --utc --nontp ++ timezone Europe/Prague --utc + """ + self._test_kickstart(ks_in, ks_out) + + def kickstart3_test(self): +- """Test the timezone command with ntp servers..""" ++ """Test the timezone command with ntp servers.""" + ks_in = """ + timezone --ntpservers ntp.cesnet.cz Europe/Prague + """ + ks_out = """ ++ timesource --ntp-server=ntp.cesnet.cz + # System timezone +- timezone Europe/Prague --ntpservers=ntp.cesnet.cz ++ timezone Europe/Prague ++ """ ++ self._test_kickstart(ks_in, ks_out) ++ ++ def kickstart_timesource_ntp_disabled_test(self): ++ """Test the timesource command with ntp disabled.""" ++ ks_in = """ ++ timesource --ntp-disable ++ """ ++ ks_out = """ ++ timesource --ntp-disable ++ """ ++ self._test_kickstart(ks_in, ks_out) ++ ++ def kickstart_timesource_ntp_server_test(self): ++ """Test the timesource command with ntp servers.""" ++ ks_in = """ ++ timesource --ntp-server ntp.cesnet.cz ++ """ ++ ks_out = """ ++ timesource --ntp-server=ntp.cesnet.cz ++ """ ++ self._test_kickstart(ks_in, ks_out) ++ ++ def kickstart_timesource_ntp_pool_test(self): ++ """Test the timesource command with ntp pools.""" ++ ks_in = """ ++ timesource --ntp-pool ntp.cesnet.cz ++ """ ++ ks_out = """ ++ timesource --ntp-pool=ntp.cesnet.cz ++ """ ++ self._test_kickstart(ks_in, ks_out) ++ ++ def kickstart_timesource_nts_test(self): ++ """Test the timesource command with the nts option.""" ++ ks_in = """ ++ timesource --ntp-pool ntp.cesnet.cz --nts ++ """ ++ ks_out = """ ++ timesource --ntp-pool=ntp.cesnet.cz --nts ++ """ ++ self._test_kickstart(ks_in, ks_out) ++ ++ def kickstart_timesource_all_test(self): ++ """Test the timesource commands.""" ++ ks_in = """ ++ timesource --ntp-server ntp.cesnet.cz ++ timesource --ntp-pool 0.fedora.pool.ntp.org ++ """ ++ ks_out = """ ++ timesource --ntp-server=ntp.cesnet.cz ++ timesource --ntp-pool=0.fedora.pool.ntp.org ++ """ ++ self._test_kickstart(ks_in, ks_out) ++ ++ def kickstart_timezone_timesource_test(self): ++ """Test the combination of timezone and timesource commands.""" ++ ks_in = """ ++ timezone --ntpservers ntp.cesnet.cz,0.fedora.pool.ntp.org Europe/Prague ++ timesource --ntp-server ntp.cesnet.cz --nts ++ timesource --ntp-pool 0.fedora.pool.ntp.org ++ """ ++ ks_out = """ ++ timesource --ntp-server=ntp.cesnet.cz ++ timesource --ntp-server=0.fedora.pool.ntp.org ++ timesource --ntp-server=ntp.cesnet.cz --nts ++ timesource --ntp-pool=0.fedora.pool.ntp.org ++ # System timezone ++ timezone Europe/Prague + """ + self._test_kickstart(ks_in, ks_out) + +-- +2.23.0 diff --git a/openeuler.conf b/openeuler.conf new file mode 100644 index 0000000..cf1e989 --- /dev/null +++ b/openeuler.conf @@ -0,0 +1,16 @@ +# Anaconda configuration file for {os_name} + +[Product] +product_name = {os_name} + +[Bootloader] +efi_dir = openEuler + +[Payload] +enable_closest_mirror = True + +[User Interface] +blivet_gui_supported = False + +[License] +eula = "/usr/share/{os_name}-release/EULA" diff --git a/remove-vender-issue-in-netdev.patch b/remove-vender-issue-in-netdev.patch new file mode 100644 index 0000000..5582da0 --- /dev/null +++ b/remove-vender-issue-in-netdev.patch @@ -0,0 +1,28 @@ +From b896b694238389a85539f60cab6ee41ab04c4f29 Mon Sep 17 00:00:00 2001 +From: t_feng +Date: Fri, 19 Jun 2020 10:25:20 +0800 +Subject: [PATCH] remove vender issue in netdev + +--- + pyanaconda/ui/gui/spokes/network.py | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/pyanaconda/ui/gui/spokes/network.py b/pyanaconda/ui/gui/spokes/network.py +index 1318e17..e906f9e 100644 +--- a/pyanaconda/ui/gui/spokes/network.py ++++ b/pyanaconda/ui/gui/spokes/network.py +@@ -782,9 +782,8 @@ class NetworkControlBox(GObject.GObject): + unplugged) + + if device: +- title += '\n%s %s' % \ +- (escape_markup(device.get_vendor() or ""), +- escape_markup(device.get_product() or "")) ++ title += '\n%s' % \ ++ (escape_markup(device.get_product() or "")) + return title + + def refresh_ui(self, state=None): +-- +2.23.0 + diff --git a/revert-Set-default-entry-to-the-BLS-id-instead-of-th.patch b/revert-Set-default-entry-to-the-BLS-id-instead-of-th.patch new file mode 100644 index 0000000..0d43f29 --- /dev/null +++ b/revert-Set-default-entry-to-the-BLS-id-instead-of-th.patch @@ -0,0 +1,46 @@ +From e45322c3ea34f5e0941ca64a2c01ed7362b7a04a Mon Sep 17 00:00:00 2001 +From: Javier Martinez Canillas +Date: Fri, 5 Aug 2022 16:51:38 +0800 +Subject: [PATCH] revert "Set default entry to the BLS id instead of the entry index" + +revert the patch of "Set default entry to the BLS id instead of the entry index" +Reference:https://github.com/rhinstaller/anaconda/commit/a252e4424bd51d6236d3b7b8e3840d8ca0af90a2 +Conflict:https://github.com/rhinstaller/anaconda/commit/a252e4424bd51d6236d3b7b8e3840d8ca0af90a2 +--- + .../modules/storage/bootloader/grub2.py | 20 +++++++++---------- + 1 file changed, 10 insertions(+), 10 deletions(-) + +diff --git a/pyanaconda/modules/storage/bootloader/grub2.py b/pyanaconda/modules/storage/bootloader/grub2.py +index a70ba7a..b0f3b0a 100644 +--- a/pyanaconda/modules/storage/bootloader/grub2.py ++++ b/pyanaconda/modules/storage/bootloader/grub2.py +@@ -351,16 +351,16 @@ class GRUB2(BootLoader): + + # make sure the default entry is the OS we are installing + if self.default is not None: +- machine_id_path = conf.target.system_root + "/etc/machine-id" +- if not os.access(machine_id_path, os.R_OK): +- log.error("failed to read machine-id, default entry not set") +- return +- +- with open(machine_id_path, "r") as fd: +- machine_id = fd.readline().strip() +- +- default_entry = "%s-%s" % (machine_id, self.default.version) +- rc = util.execInSysroot("grub2-set-default", [default_entry]) ++ # find the index of the default image ++ try: ++ default_index = self.images.index(self.default) ++ except ValueError: ++ # pylint: disable=no-member ++ log.warning("Failed to find default image (%s), defaulting to 0", ++ self.default.label) ++ default_index = 0 ++ ++ rc = util.execInSysroot("grub2-set-default", [str(default_index)]) + if rc: + log.error("failed to set default menu entry to %s", productName) + +-- +2.27.0 + diff --git a/support-use-sm3-crypt-user-password.patch b/support-use-sm3-crypt-user-password.patch new file mode 100644 index 0000000..fa57f10 --- /dev/null +++ b/support-use-sm3-crypt-user-password.patch @@ -0,0 +1,235 @@ +From b311b645f9447f7e765b0e418d3f37c32e2702e1 Mon Sep 17 00:00:00 2001 +From: liuxin +Date: Fri, 29 Oct 2021 16:01:57 +0800 +Subject: [PATCH] support use sm3 crypt user password + +--- + po/zh_CN.po | 5 ++++ + pyanaconda/core/users.py | 7 ++++-- + pyanaconda/ui/gui/spokes/root_password.glade | 15 ++++++++++++ + pyanaconda/ui/gui/spokes/root_password.py | 15 +++++++++++- + pyanaconda/ui/gui/spokes/user.glade | 16 ++++++++++++- + pyanaconda/ui/gui/spokes/user.py | 14 ++++++++++- + .../pyanaconda_tests/crypt_password_test.py | 23 +++++++++++++++++++ + 7 files changed, 90 insertions(+), 5 deletions(-) + create mode 100644 tests/nosetests/pyanaconda_tests/crypt_password_test.py + +diff --git a/po/zh_CN.po b/po/zh_CN.po +index 7ee5511..df9e015 100644 +--- a/po/zh_CN.po ++++ b/po/zh_CN.po +@@ -7203,3 +7203,8 @@ msgstr "开始安装到硬盘" + #~ msgstr[0] "" + #~ "%(count)d 个磁盘;容量 %(size)s;空闲空间 %(free)s (包括未分区及文" + #~ "件系统内的部分)" ++ ++#: pyanaconda/ui/gui/spokes/root_password.glade:215 ++#: pyanaconda/ui/gui/spokes/user.glade:278 ++msgid "Use SM3 to encrypt the password" ++msgstr "使用SM3算法加密密码" +diff --git a/pyanaconda/core/users.py b/pyanaconda/core/users.py +index db34444..171a2d4 100644 +--- a/pyanaconda/core/users.py ++++ b/pyanaconda/core/users.py +@@ -35,7 +35,7 @@ from pyanaconda.anaconda_loggers import get_module_logger + log = get_module_logger(__name__) + + +-def crypt_password(password): ++def crypt_password(password, algo=None): + """Crypt a password. + + Process a password with appropriate salted one-way algorithm. +@@ -44,7 +44,10 @@ def crypt_password(password): + :returns: crypted representation of the original password + :rtype: str + """ +- cryptpw = crypt.crypt(password, crypt.METHOD_SHA512) ++ crypt_method = crypt.METHOD_SHA512 ++ if algo == "sm3": ++ crypt_method = crypt.METHOD_SM3 ++ cryptpw = crypt.crypt(password, crypt_method) + if cryptpw is None: + exn = PasswordCryptError(algo=crypt.METHOD_SHA512) + if errorHandler.cb(exn) == ERROR_RAISE: +diff --git a/pyanaconda/ui/gui/spokes/root_password.glade b/pyanaconda/ui/gui/spokes/root_password.glade +index 6892ae3..e8ff524 100644 +--- a/pyanaconda/ui/gui/spokes/root_password.glade ++++ b/pyanaconda/ui/gui/spokes/root_password.glade +@@ -210,6 +210,21 @@ + 2 + + ++ ++ ++ Use SM3 to encrypt the password ++ True ++ False ++ start ++ True ++ ++ ++ ++ False ++ True ++ 3 ++ ++ + + + +diff --git a/pyanaconda/ui/gui/spokes/root_password.py b/pyanaconda/ui/gui/spokes/root_password.py +index d609453..9e6477e 100644 +--- a/pyanaconda/ui/gui/spokes/root_password.py ++++ b/pyanaconda/ui/gui/spokes/root_password.py +@@ -61,6 +61,8 @@ class PasswordSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler) + self._services_module = SERVICES.get_proxy() + self._refresh_running = False + self._manually_locked = False ++ # sm3 password method ++ self._passwd_method_sm3 = False + + def initialize(self): + NormalSpoke.initialize(self) +@@ -75,6 +77,9 @@ class PasswordSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler) + self._root_password_ssh_login_override.set_visible(False) + self._root_password_ssh_login_override.set_no_show_all(True) + ++ # sm3 object ++ self._passwd_method_button = self.builder.get_object("passwd_sm3") ++ + # Install the password checks: + # - Has a password been specified? + # - If a password has been specified and there is data in the confirm box, do they match? +@@ -197,9 +202,14 @@ class PasswordSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler) + self._users_module.ClearRootPassword() + return + ++ if self._passwd_method_sm3 is True: ++ algo = "sm3" ++ else: ++ algo = None ++ + # we have a password - set it to kickstart data + +- self._users_module.SetCryptedRootPassword(crypt_password(pw)) ++ self._users_module.SetCryptedRootPassword(crypt_password(pw, algo)) + + # clear any placeholders + self.remove_placeholder_texts() +@@ -337,3 +347,6 @@ class PasswordSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler) + if not lock.get_active(): + self.password_entry.grab_focus() + self._manually_locked = True ++ ++ def on_sm3_clicked(self, button): ++ self._passwd_method_sm3 = self._passwd_method_button.get_active() +diff --git a/pyanaconda/ui/gui/spokes/user.glade b/pyanaconda/ui/gui/spokes/user.glade +index 69156b1..1cca343 100644 +--- a/pyanaconda/ui/gui/spokes/user.glade ++++ b/pyanaconda/ui/gui/spokes/user.glade +@@ -273,6 +273,20 @@ + 3 + + ++ ++ ++ Use SM3 to encrypt the password ++ True ++ False ++ start ++ True ++ ++ ++ ++ 1 ++ 8 ++ ++ + + + True +@@ -295,7 +309,7 @@ + + + 1 +- 8 ++ 9 + + + +diff --git a/pyanaconda/ui/gui/spokes/user.py b/pyanaconda/ui/gui/spokes/user.py +index 05e01f8..a5d5828 100644 +--- a/pyanaconda/ui/gui/spokes/user.py ++++ b/pyanaconda/ui/gui/spokes/user.py +@@ -256,6 +256,8 @@ class UserSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler): + + self._users_module = USERS.get_proxy() + self._password_is_required = True ++ # sm3 password method ++ self._passwd_method_sm3 = False + + def initialize(self): + NormalSpoke.initialize(self) +@@ -289,6 +291,9 @@ class UserSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler): + self._password_bar = self.builder.get_object("password_bar") + self._password_label = self.builder.get_object("password_label") + ++ # sm3 object ++ self._passwd_method_button = self.builder.get_object("passwd_sm3") ++ + # Install the password checks: + # - Has a password been specified? + # - If a password has been specified and there is data in the confirm box, do they match? +@@ -463,7 +468,11 @@ class UserSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler): + if self.password_required: + if self.password: + self.password_kickstarted = False +- self.user.password = crypt_password(self.password) ++ if self._passwd_method_sm3 is True: ++ algo = "sm3" ++ else: ++ algo = None ++ self.user.password = crypt_password(self.password, algo) + self.user.is_crypted = True + self.remove_placeholder_texts() + +@@ -688,3 +697,6 @@ class UserSpoke(FirstbootSpokeMixIn, NormalSpoke, GUISpokeInputCheckHandler): + NormalSpoke.on_back_clicked(self, button) + else: + log.info("Return to hub prevented by password checking rules.") ++ ++ def on_sm3_clicked(self, button): ++ self._passwd_method_sm3 = self._passwd_method_button.get_active() +diff --git a/tests/nosetests/pyanaconda_tests/crypt_password_test.py b/tests/nosetests/pyanaconda_tests/crypt_password_test.py +new file mode 100644 +index 0000000..0ceb16b +--- /dev/null ++++ b/tests/nosetests/pyanaconda_tests/crypt_password_test.py +@@ -0,0 +1,23 @@ ++from pyanaconda.core.users import crypt_password ++import unittest ++import crypt ++import os ++ ++@unittest.skipIf(os.geteuid() != 0, "user creation must be run as root") ++class CryptPasswordTest(unittest.TestCase): ++ def setUp(self): ++ pass ++ ++ def tearDown(self): ++ pass ++ ++ def test_crypt_password(self): ++ origin_password = "password" ++ encrypted = crypt_password(origin_password, "sm3") ++ self.assertTrue(encrypted.startswith("$sm3$")) ++ ++ encrypted = crypt_password(origin_password) ++ self.assertTrue(encrypted.startswith("$6$")) ++ ++if __name__ == '__main__': ++ unittest.main() +-- +2.27.0 + diff --git a/use-modinfo-to-check-ko-before-modprobe.patch b/use-modinfo-to-check-ko-before-modprobe.patch new file mode 100644 index 0000000..4a96d8a --- /dev/null +++ b/use-modinfo-to-check-ko-before-modprobe.patch @@ -0,0 +1,24 @@ +From 9f6c2300b7b4c9671275159d6355c731574120ba Mon Sep 17 00:00:00 2001 +From: ft272781150 +Date: Thu, 11 Jun 2020 10:19:49 +0800 +Subject: [PATCH] Use modinfo to check ko before modprobe + +--- + dracut/anaconda-modprobe.sh | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/dracut/anaconda-modprobe.sh b/dracut/anaconda-modprobe.sh +index 8dd9ab36e7..39c47776c4 100755 +--- a/dracut/anaconda-modprobe.sh ++++ b/dracut/anaconda-modprobe.sh +@@ -38,6 +38,10 @@ MODULE_LIST+=" raid0 raid1 raid5 raid6 raid456 raid10 linear dm-mod dm-zero \ + sha256 lrw xts " + + for m in $MODULE_LIST; do ++ if ! modinfo $m >/dev/null 2>&1 ; then ++ echo "anaconda-modprobe: Module $m not found" >&2 ++ continue ++ fi + if modprobe $m ; then + debug_msg "$m was loaded" + else