mirror of
https://github.com/BigfootACA/arch-image-builder.git
synced 2024-11-11 07:57:53 +08:00
add more comments
Signed-off-by: BigfootACA <bigfoot@classfun.cn>
This commit is contained in:
parent
970a0a5cde
commit
067ee2d341
@ -6,41 +6,75 @@ log = getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def write_fstab(ctx: ArchBuilderContext):
|
def write_fstab(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Generate fstab and write to rootfs
|
||||||
|
"""
|
||||||
log.debug(
|
log.debug(
|
||||||
"generate fstab:\n\t%s",
|
"generate fstab:\n\t%s",
|
||||||
ctx.fstab.to_mount_file("\n\t").strip()
|
ctx.fstab.to_mount_file("\n\t").strip()
|
||||||
)
|
)
|
||||||
|
# WORKSPACE/TARGET/rootfs/etc/fstab
|
||||||
path = os.path.join(ctx.get_rootfs(), "etc/fstab")
|
path = os.path.join(ctx.get_rootfs(), "etc/fstab")
|
||||||
with open_config(path) as f:
|
with open_config(path) as f:
|
||||||
ctx.fstab.write_mount_file(f)
|
ctx.fstab.write_mount_file(f)
|
||||||
|
|
||||||
|
|
||||||
def mount_all(ctx: ArchBuilderContext):
|
def mount_all(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Mount all filesystems in fstab for build
|
||||||
|
"""
|
||||||
path = ctx.get_mount()
|
path = ctx.get_mount()
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
|
|
||||||
|
# ensure WORKSPACE/TARGET/mount is existing
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
os.mkdir(path, mode=0o0755)
|
os.mkdir(path, mode=0o0755)
|
||||||
|
|
||||||
|
# the first item must be ROOT (sorted by ctx.fstab.resort())
|
||||||
if ctx.fstab[0].target != "/":
|
if ctx.fstab[0].target != "/":
|
||||||
raise RuntimeError("no root to mount")
|
raise RuntimeError("no root to mount")
|
||||||
|
|
||||||
for mnt in ctx.fstab:
|
for mnt in ctx.fstab:
|
||||||
|
# do not change original item
|
||||||
m = mnt.clone()
|
m = mnt.clone()
|
||||||
|
|
||||||
|
# skip virtual source device
|
||||||
if m.source == "none": continue
|
if m.source == "none": continue
|
||||||
|
|
||||||
|
# we should mount virtual device
|
||||||
|
# original: /dev/mmcblk0p1, PARTLABEL=linux
|
||||||
|
# we need: /dev/loop0, /dev/loop1
|
||||||
|
# see builder.disk.filesystem.build.FileSystemBuilder.proc_fstab()
|
||||||
if m.source not in ctx.fsmap:
|
if m.source not in ctx.fsmap:
|
||||||
raise RuntimeError(f"source {m.source} cannot map to host")
|
raise RuntimeError(f"source {m.source} cannot map to host")
|
||||||
m.source = ctx.fsmap[m.source]
|
m.source = ctx.fsmap[m.source]
|
||||||
if m.target == "/": in_mnt, in_root = path, root
|
|
||||||
|
if m.target == "/":
|
||||||
|
# process ROOT resolve unneeded
|
||||||
|
in_mnt, in_root = path, root
|
||||||
elif m.target.startswith("/"):
|
elif m.target.startswith("/"):
|
||||||
|
# resolve to ROOT and MOUNT
|
||||||
|
# m.target: /boot
|
||||||
|
# in_mnt: WORKSPACE/TARGET/mount/boot
|
||||||
|
# in_root: WORKSPACE/TARGET/rootfs/boot
|
||||||
folder = m.target[1:]
|
folder = m.target[1:]
|
||||||
in_mnt = os.path.join(path, folder)
|
in_mnt = os.path.join(path, folder)
|
||||||
in_root = os.path.join(root, folder)
|
in_root = os.path.join(root, folder)
|
||||||
elif m.fstype == "swap" or m.target == "none": continue
|
elif m.fstype == "swap" or m.target == "none":
|
||||||
|
# skip mount virtual fs and swap
|
||||||
|
continue
|
||||||
else: raise RuntimeError(f"target {m.target} cannot map to host")
|
else: raise RuntimeError(f"target {m.target} cannot map to host")
|
||||||
|
|
||||||
if in_mnt:
|
if in_mnt:
|
||||||
|
# ensure mount target is exists
|
||||||
m.target = in_mnt
|
m.target = in_mnt
|
||||||
if not os.path.exists(in_mnt):
|
if not os.path.exists(in_mnt):
|
||||||
os.makedirs(in_mnt, mode=0o0755)
|
os.makedirs(in_mnt, mode=0o0755)
|
||||||
if in_root and not os.path.exists(in_root):
|
if in_root and not os.path.exists(in_root):
|
||||||
|
# ensure the folder is also exists in rootfs
|
||||||
os.makedirs(in_root, mode=0o0755)
|
os.makedirs(in_root, mode=0o0755)
|
||||||
|
|
||||||
|
# invoke real mount
|
||||||
m.mount()
|
m.mount()
|
||||||
ctx.mounted.insert(0, m)
|
ctx.mounted.insert(0, m)
|
||||||
|
|
||||||
|
@ -27,6 +27,9 @@ def get_prop(
|
|||||||
path: bool = False,
|
path: bool = False,
|
||||||
multi: bool = False,
|
multi: bool = False,
|
||||||
) -> str | None:
|
) -> str | None:
|
||||||
|
"""
|
||||||
|
Get a config value for grub
|
||||||
|
"""
|
||||||
value = ctx.get(f"kernel.{name}", None)
|
value = ctx.get(f"kernel.{name}", None)
|
||||||
if name in cfg: value = cfg[name]
|
if name in cfg: value = cfg[name]
|
||||||
if value is None: return None
|
if value is None: return None
|
||||||
@ -34,6 +37,7 @@ def get_prop(
|
|||||||
value = [value]
|
value = [value]
|
||||||
if len(value) == 0: return None
|
if len(value) == 0: return None
|
||||||
if path:
|
if path:
|
||||||
|
# must starts with /
|
||||||
for i in range(len(value)):
|
for i in range(len(value)):
|
||||||
if not value[i].startswith("/"):
|
if not value[i].startswith("/"):
|
||||||
value[i] = "/" + value[i]
|
value[i] = "/" + value[i]
|
||||||
@ -43,6 +47,9 @@ def get_prop(
|
|||||||
|
|
||||||
|
|
||||||
def fstype_to_mod(name: str) -> str:
|
def fstype_to_mod(name: str) -> str:
|
||||||
|
"""
|
||||||
|
Map filesystem type to GRUB2 modules name
|
||||||
|
"""
|
||||||
match name:
|
match name:
|
||||||
case "ext3": return "ext2"
|
case "ext3": return "ext2"
|
||||||
case "ext4": return "ext2"
|
case "ext4": return "ext2"
|
||||||
@ -51,38 +58,72 @@ def fstype_to_mod(name: str) -> str:
|
|||||||
case "fat16": return "fat"
|
case "fat16": return "fat"
|
||||||
case "fat32": return "fat"
|
case "fat32": return "fat"
|
||||||
case "msdos": return "fat"
|
case "msdos": return "fat"
|
||||||
|
# TODO: add more filesystems
|
||||||
case _: return name
|
case _: return name
|
||||||
|
|
||||||
|
|
||||||
def gen_menuentry(ctx: ArchBuilderContext, cfg: dict) -> str:
|
def gen_menuentry(ctx: ArchBuilderContext, cfg: dict) -> str:
|
||||||
|
"""
|
||||||
|
Generate a menuentry config for grub
|
||||||
|
"""
|
||||||
ret = ""
|
ret = ""
|
||||||
|
|
||||||
|
# menuentry name (default to Linux)
|
||||||
name = cfg["name"] if "name" in cfg else "Linux"
|
name = cfg["name"] if "name" in cfg else "Linux"
|
||||||
|
|
||||||
|
# kernel image path
|
||||||
kernel = get_prop(ctx, "kernel", cfg, True)
|
kernel = get_prop(ctx, "kernel", cfg, True)
|
||||||
|
|
||||||
|
# initramfs image path (supports multiples)
|
||||||
initramfs = get_prop(ctx, "initramfs", cfg, True, True)
|
initramfs = get_prop(ctx, "initramfs", cfg, True, True)
|
||||||
|
|
||||||
|
# device tree blob path (supports multiples)
|
||||||
devicetree = get_prop(ctx, "devicetree", cfg, True, True)
|
devicetree = get_prop(ctx, "devicetree", cfg, True, True)
|
||||||
|
|
||||||
|
# kernel command line
|
||||||
cmdline = get_prop(ctx, "cmdline", cfg, False, True)
|
cmdline = get_prop(ctx, "cmdline", cfg, False, True)
|
||||||
|
|
||||||
|
# the folder to place these files
|
||||||
path = get_prop(ctx, "path", cfg, False, False)
|
path = get_prop(ctx, "path", cfg, False, False)
|
||||||
|
|
||||||
if kernel is None: raise ArchBuilderConfigError("no kernel for grub")
|
if kernel is None: raise ArchBuilderConfigError("no kernel for grub")
|
||||||
if cmdline is None: cmdline = ""
|
if cmdline is None: cmdline = ""
|
||||||
ret += f"menuentry '{name}' {{\n"
|
ret += f"menuentry '{name}' {{\n"
|
||||||
|
|
||||||
|
# if path set: load filesystem module and search to set root
|
||||||
if path:
|
if path:
|
||||||
|
# find out the mount point in fstab
|
||||||
fs = ctx.fstab.find_target(path)
|
fs = ctx.fstab.find_target(path)
|
||||||
if fs is None or len(fs) == 0 or fs[0] is None:
|
if fs is None or len(fs) == 0 or fs[0] is None:
|
||||||
raise ArchBuilderConfigError(f"mountpoint {path} not found")
|
raise ArchBuilderConfigError(f"mountpoint {path} not found")
|
||||||
dev = fs[0].source
|
dev = fs[0].source
|
||||||
|
|
||||||
|
# map to virtual file system
|
||||||
if dev in ctx.fsmap: dev = ctx.fsmap[dev]
|
if dev in ctx.fsmap: dev = ctx.fsmap[dev]
|
||||||
|
|
||||||
|
# get the filesystem UUID to search
|
||||||
uuid = blkid.get_tag_value(None, "UUID", dev)
|
uuid = blkid.get_tag_value(None, "UUID", dev)
|
||||||
if uuid is None: raise RuntimeError(f"cannot detect uuid for {path}")
|
if uuid is None: raise RuntimeError(f"cannot detect uuid for {path}")
|
||||||
|
|
||||||
|
# load filesystem module and search target
|
||||||
ret += "\tinsmod %s\n" % fstype_to_mod(fs[0].fstype)
|
ret += "\tinsmod %s\n" % fstype_to_mod(fs[0].fstype)
|
||||||
ret += f"\tsearch --no-floppy --fs-uuid --set=root {uuid}\n"
|
ret += f"\tsearch --no-floppy --fs-uuid --set=root {uuid}\n"
|
||||||
|
|
||||||
|
# add device tree blob field
|
||||||
if devicetree:
|
if devicetree:
|
||||||
ret += "\techo 'Loading Device Tree...'\n"
|
ret += "\techo 'Loading Device Tree...'\n"
|
||||||
ret += f"\tdevicetree {devicetree}\n"
|
ret += f"\tdevicetree {devicetree}\n"
|
||||||
|
|
||||||
|
# add kernel path field and kernel command line
|
||||||
ret += "\techo 'Loading Kernel...'\n"
|
ret += "\techo 'Loading Kernel...'\n"
|
||||||
ret += f"\tlinux {kernel} {cmdline}\n"
|
ret += f"\tlinux {kernel} {cmdline}\n"
|
||||||
|
|
||||||
|
# add initramfs field
|
||||||
if initramfs:
|
if initramfs:
|
||||||
ret += "\techo 'Loading Initramfs...'\n"
|
ret += "\techo 'Loading Initramfs...'\n"
|
||||||
ret += f"\tinitrd {initramfs}\n"
|
ret += f"\tinitrd {initramfs}\n"
|
||||||
|
|
||||||
|
# boot into linux (not add 'boot' command, its imply by menuentry)
|
||||||
ret += "\techo 'Booting...'\n"
|
ret += "\techo 'Booting...'\n"
|
||||||
ret += f"}}\n"
|
ret += f"}}\n"
|
||||||
return ret
|
return ret
|
||||||
@ -90,18 +131,26 @@ def gen_menuentry(ctx: ArchBuilderContext, cfg: dict) -> str:
|
|||||||
|
|
||||||
def gen_basic(ctx: ArchBuilderContext) -> str:
|
def gen_basic(ctx: ArchBuilderContext) -> str:
|
||||||
ret = ""
|
ret = ""
|
||||||
|
|
||||||
|
# load generic modules
|
||||||
ret += "insmod part_gpt\n"
|
ret += "insmod part_gpt\n"
|
||||||
ret += "insmod part_msdos\n"
|
ret += "insmod part_msdos\n"
|
||||||
ret += "insmod all_video\n"
|
ret += "insmod all_video\n"
|
||||||
|
|
||||||
|
# setup console and serial
|
||||||
ret += "terminal_input console\n"
|
ret += "terminal_input console\n"
|
||||||
ret += "terminal_output console\n"
|
ret += "terminal_output console\n"
|
||||||
ret += "if serial --unit=0 --speed=115200; then\n"
|
ret += "if serial --unit=0 --speed=115200; then\n"
|
||||||
ret += "\tterminal_input --append console\n"
|
ret += "\tterminal_input --append console\n"
|
||||||
ret += "\tterminal_output --append console\n"
|
ret += "\tterminal_output --append console\n"
|
||||||
ret += "fi\n"
|
ret += "fi\n"
|
||||||
|
|
||||||
|
# set grub timeout seconds
|
||||||
ret += "set timeout_style=menu\n"
|
ret += "set timeout_style=menu\n"
|
||||||
timeout = ctx.get("bootloader.timeout", 5)
|
timeout = ctx.get("bootloader.timeout", 5)
|
||||||
ret += f"set timeout={timeout}\n"
|
ret += f"set timeout={timeout}\n"
|
||||||
|
|
||||||
|
# find out the default entry
|
||||||
default = 0
|
default = 0
|
||||||
items = ctx.get("bootloader.items", [])
|
items = ctx.get("bootloader.items", [])
|
||||||
for idx in range(len(items)):
|
for idx in range(len(items)):
|
||||||
@ -109,10 +158,14 @@ def gen_basic(ctx: ArchBuilderContext) -> str:
|
|||||||
if "default" in item and item["default"]:
|
if "default" in item and item["default"]:
|
||||||
default = idx
|
default = idx
|
||||||
ret += f"set default={default}\n"
|
ret += f"set default={default}\n"
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def mkconfig(ctx: ArchBuilderContext) -> str:
|
def mkconfig(ctx: ArchBuilderContext) -> str:
|
||||||
|
"""
|
||||||
|
Generate a full grub config for current rootfs
|
||||||
|
"""
|
||||||
ret = ""
|
ret = ""
|
||||||
ret += gen_basic(ctx)
|
ret += gen_basic(ctx)
|
||||||
for item in ctx.get("bootloader.items", []):
|
for item in ctx.get("bootloader.items", []):
|
||||||
@ -121,15 +174,23 @@ def mkconfig(ctx: ArchBuilderContext) -> str:
|
|||||||
|
|
||||||
|
|
||||||
def proc_targets(ctx: ArchBuilderContext, install: str):
|
def proc_targets(ctx: ArchBuilderContext, install: str):
|
||||||
|
"""
|
||||||
|
Copy grub target folder directly
|
||||||
|
"""
|
||||||
copies = [".mod", ".lst"]
|
copies = [".mod", ".lst"]
|
||||||
folder = os.path.join(ctx.get_rootfs(), "usr/lib/grub")
|
folder = os.path.join(ctx.get_rootfs(), "usr/lib/grub")
|
||||||
for target in ctx.get("grub.targets", []):
|
for target in ctx.get("grub.targets", []):
|
||||||
|
# target name format: i386-pc, arm64-efi
|
||||||
if "/" in target: raise ArchBuilderConfigError(f"bad target {target}")
|
if "/" in target: raise ArchBuilderConfigError(f"bad target {target}")
|
||||||
base = os.path.join(folder, target)
|
base = os.path.join(folder, target)
|
||||||
|
|
||||||
|
# at least we need linux.mod
|
||||||
if not os.path.exists(os.path.join(base, "linux.mod")):
|
if not os.path.exists(os.path.join(base, "linux.mod")):
|
||||||
raise ArchBuilderConfigError(f"target {target} not found")
|
raise ArchBuilderConfigError(f"target {target} not found")
|
||||||
dest = os.path.join(install, target)
|
dest = os.path.join(install, target)
|
||||||
os.makedirs(dest, mode=0o0755, exist_ok=True)
|
os.makedirs(dest, mode=0o0755, exist_ok=True)
|
||||||
|
|
||||||
|
# copy grub target
|
||||||
for file in os.listdir(base):
|
for file in os.listdir(base):
|
||||||
if not any((file.endswith(name) for name in copies)):
|
if not any((file.endswith(name) for name in copies)):
|
||||||
continue
|
continue
|
||||||
@ -141,6 +202,9 @@ def proc_targets(ctx: ArchBuilderContext, install: str):
|
|||||||
|
|
||||||
|
|
||||||
def proc_config(ctx: ArchBuilderContext, install: str):
|
def proc_config(ctx: ArchBuilderContext, install: str):
|
||||||
|
"""
|
||||||
|
Generate a full grub config for current rootfs and write to install folder
|
||||||
|
"""
|
||||||
content = mkconfig(ctx)
|
content = mkconfig(ctx)
|
||||||
cfg = os.path.join(install, "grub.cfg")
|
cfg = os.path.join(install, "grub.cfg")
|
||||||
with open(cfg, "w") as f:
|
with open(cfg, "w") as f:
|
||||||
@ -149,6 +213,9 @@ def proc_config(ctx: ArchBuilderContext, install: str):
|
|||||||
|
|
||||||
|
|
||||||
def efi_arch_name(target: str) -> str:
|
def efi_arch_name(target: str) -> str:
|
||||||
|
"""
|
||||||
|
Map grub target name to UEFI arch name
|
||||||
|
"""
|
||||||
match target:
|
match target:
|
||||||
case "arm64-efi": return "aa64"
|
case "arm64-efi": return "aa64"
|
||||||
case "x86_64-efi": return "x64"
|
case "x86_64-efi": return "x64"
|
||||||
@ -161,50 +228,97 @@ def efi_arch_name(target: str) -> str:
|
|||||||
|
|
||||||
|
|
||||||
def efi_boot_name(target: str) -> str:
|
def efi_boot_name(target: str) -> str:
|
||||||
|
"""
|
||||||
|
Map grub target name to UEFI default boot file name
|
||||||
|
"""
|
||||||
name = efi_arch_name(target)
|
name = efi_arch_name(target)
|
||||||
return f"boot{name}.efi"
|
return f"boot{name}.efi"
|
||||||
|
|
||||||
|
|
||||||
def proc_mkimage_efi(ctx: ArchBuilderContext, target: str):
|
def proc_mkimage_efi(ctx: ArchBuilderContext, target: str):
|
||||||
|
"""
|
||||||
|
Create GRUB EFI image for boot
|
||||||
|
"""
|
||||||
cmds = ["grub-mkimage"]
|
cmds = ["grub-mkimage"]
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
|
|
||||||
|
# allowed esp folders
|
||||||
efi_folders = ["/boot", "/boot/efi", "/efi", "/esp"]
|
efi_folders = ["/boot", "/boot/efi", "/efi", "/esp"]
|
||||||
|
|
||||||
|
# grub2 source folder in rootfs (WORKSPACE/TARGET/rootfs/usr/lib/grub/x86_64-efi)
|
||||||
base = os.path.join(root, "usr/lib/grub", target)
|
base = os.path.join(root, "usr/lib/grub", target)
|
||||||
|
|
||||||
|
# install path in rootfs (/boot/grub)
|
||||||
install = ctx.get("grub.path", "/boot/grub")
|
install = ctx.get("grub.path", "/boot/grub")
|
||||||
|
|
||||||
|
# why this function called proc_mkimage_efi?
|
||||||
if not target.endswith("-efi"):
|
if not target.endswith("-efi"):
|
||||||
raise RuntimeError("mkimage efi only for *-efi")
|
raise RuntimeError("mkimage efi only for *-efi")
|
||||||
esp: MountPoint | None = None
|
|
||||||
grub: MountPoint | None = None
|
# must ends with /
|
||||||
fdir = install + "/"
|
fdir = os.path.realpath(install) + "/"
|
||||||
|
|
||||||
|
# find out requires mount point
|
||||||
|
esp: MountPoint | None = None # UEFI system partition
|
||||||
|
grub: MountPoint | None = None # GRUB install folder
|
||||||
for mnt in ctx.fstab:
|
for mnt in ctx.fstab:
|
||||||
|
# esp must be fat
|
||||||
if fstype_to_mod(mnt.fstype) == "fat":
|
if fstype_to_mod(mnt.fstype) == "fat":
|
||||||
if mnt.target in efi_folders:
|
if mnt.target in efi_folders:
|
||||||
esp = mnt
|
esp = mnt
|
||||||
|
|
||||||
|
# add an end slash to avoid same prefix (likes /boot /bootfs)
|
||||||
tdir = mnt.target
|
tdir = mnt.target
|
||||||
if not tdir.endswith("/"): tdir += "/"
|
if not tdir.endswith("/"): tdir += "/"
|
||||||
|
|
||||||
|
# grub install folder
|
||||||
if fdir.startswith(tdir):
|
if fdir.startswith(tdir):
|
||||||
|
# find out the deepest mount point
|
||||||
|
# to avoid / but installed in /boot
|
||||||
if (not grub) or mnt.level >= grub.level:
|
if (not grub) or mnt.level >= grub.level:
|
||||||
grub = mnt
|
grub = mnt
|
||||||
if esp is None: raise RuntimeError("efi partiton not found")
|
if esp is None: raise RuntimeError("efi partition not found")
|
||||||
if grub is None: raise RuntimeError("grub install folder not found")
|
if grub is None: raise RuntimeError("grub install folder not found")
|
||||||
esp_dest = esp.target
|
|
||||||
if esp_dest.startswith("/"): esp_dest = esp_dest[1:]
|
# grub install target folder (/boot/grub)
|
||||||
if not install.startswith("/"): install = "/" + install
|
if not install.startswith("/"):
|
||||||
|
install = "/" + install
|
||||||
|
|
||||||
|
# must in grub install folder
|
||||||
if not install.startswith(grub.target):
|
if not install.startswith(grub.target):
|
||||||
raise RuntimeError("grub install prefix not found")
|
raise RuntimeError("grub install prefix not found")
|
||||||
|
|
||||||
|
# get grub install path in target partition
|
||||||
|
# Mount GRUB Install Prefix
|
||||||
|
# /boot /boot/grub /grub
|
||||||
|
# / /boot/grub /boot/grub
|
||||||
prefix = install[len(grub.target):]
|
prefix = install[len(grub.target):]
|
||||||
if not prefix.startswith("/"): prefix = "/" + prefix
|
if not prefix.startswith("/"): prefix = "/" + prefix
|
||||||
|
|
||||||
|
# get UUID of grub installed filesystem UUID
|
||||||
device = (ctx.fsmap[grub.source] if grub.source in ctx.fsmap else grub.source)
|
device = (ctx.fsmap[grub.source] if grub.source in ctx.fsmap else grub.source)
|
||||||
uuid = blkid.get_tag_value(None, "UUID", device)
|
uuid = blkid.get_tag_value(None, "UUID", device)
|
||||||
if not uuid: raise RuntimeError(
|
if not uuid: raise RuntimeError(
|
||||||
"failed to detect uuid for grub install path"
|
"failed to detect uuid for grub install path"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# esp install target folder (boot/efi)
|
||||||
|
esp_dest = esp.target
|
||||||
|
if esp_dest.startswith("/"):
|
||||||
|
esp_dest = esp_dest[1:]
|
||||||
|
|
||||||
|
# esp install target folder in rootfs (WORKSPACE/TARGET/rootfs/boot/efi)
|
||||||
efi_folder = os.path.join(root, esp_dest)
|
efi_folder = os.path.join(root, esp_dest)
|
||||||
|
|
||||||
|
# grub install target folder in rootfs (WORKSPACE/TARGET/rootfs/boot/grub)
|
||||||
grub_folder = os.path.join(root, install[1:])
|
grub_folder = os.path.join(root, install[1:])
|
||||||
|
|
||||||
cmds.append(f"--format={target}")
|
cmds.append(f"--format={target}")
|
||||||
cmds.append(f"--directory={base}")
|
cmds.append(f"--directory={base}")
|
||||||
cmds.append(f"--prefix={prefix}")
|
cmds.append(f"--prefix={prefix}")
|
||||||
cmds.append("--compression=xz")
|
cmds.append("--compression=xz")
|
||||||
|
|
||||||
|
# put builtin config into grub install folder
|
||||||
builtin = os.path.join(grub_folder, "grub.builtin.cfg")
|
builtin = os.path.join(grub_folder, "grub.builtin.cfg")
|
||||||
with open(builtin, "w") as f:
|
with open(builtin, "w") as f:
|
||||||
f.write(f"search --no-floppy --fs-uuid --set=root {uuid}\n")
|
f.write(f"search --no-floppy --fs-uuid --set=root {uuid}\n")
|
||||||
@ -213,62 +327,98 @@ def proc_mkimage_efi(ctx: ArchBuilderContext, target: str):
|
|||||||
f.write("echo \"Failed to switch into normal mode\"\n")
|
f.write("echo \"Failed to switch into normal mode\"\n")
|
||||||
f.write("sleep 5\n")
|
f.write("sleep 5\n")
|
||||||
cmds.append(f"--config={builtin}")
|
cmds.append(f"--config={builtin}")
|
||||||
|
|
||||||
|
# efi boot image install folder (WORKSPACE/TARGET/rootfs/boot/efi/efi/boot)
|
||||||
efi = os.path.join(efi_folder, "efi/boot")
|
efi = os.path.join(efi_folder, "efi/boot")
|
||||||
os.makedirs(efi, mode=0o0755, exist_ok=True)
|
os.makedirs(efi, mode=0o0755, exist_ok=True)
|
||||||
|
|
||||||
|
# efi boot image (WORKSPACE/TARGET/rootfs/boot/efi/efi/boot/bootx64.efi)
|
||||||
out = os.path.join(efi, efi_boot_name(target))
|
out = os.path.join(efi, efi_boot_name(target))
|
||||||
cmds.append(f"--output={out}")
|
cmds.append(f"--output={out}")
|
||||||
if os.path.exists(out): os.remove(out)
|
if os.path.exists(out): os.remove(out)
|
||||||
|
|
||||||
cmds.extend(modules)
|
cmds.extend(modules)
|
||||||
|
|
||||||
|
# run grub-mkimage
|
||||||
ret = ctx.run_external(cmds)
|
ret = ctx.run_external(cmds)
|
||||||
if ret != 0: raise OSError("grub-mkimage failed")
|
if ret != 0: raise OSError("grub-mkimage failed")
|
||||||
log.info(f"generated grub {target} efi image {out}")
|
log.info(f"generated grub {target} efi image {out}")
|
||||||
|
|
||||||
|
|
||||||
def proc_bootsec(ctx: ArchBuilderContext, target: str):
|
def proc_bootsec(ctx: ArchBuilderContext, target: str):
|
||||||
|
"""
|
||||||
|
Install boot sector for x86 PC for grub-install
|
||||||
|
"""
|
||||||
mods = []
|
mods = []
|
||||||
cmds = ["grub-install"]
|
cmds = ["grub-install"]
|
||||||
if target != "i386-pc":
|
if target != "i386-pc":
|
||||||
raise RuntimeError("bootsec only for i386-pc")
|
raise RuntimeError("bootsec only for i386-pc")
|
||||||
mount = ctx.get_mount()
|
mount = ctx.get_mount()
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
|
|
||||||
|
# get grub install base folder (boot)
|
||||||
install: str = ctx.get("grub.path", "/boot/grub")
|
install: str = ctx.get("grub.path", "/boot/grub")
|
||||||
if install.startswith("/"): install = install[1:]
|
if install.startswith("/"): install = install[1:]
|
||||||
|
if install.endswith("/grub"): install = install[:-5]
|
||||||
|
|
||||||
grub = os.path.join(root, "usr/lib/grub", target)
|
grub = os.path.join(root, "usr/lib/grub", target)
|
||||||
if install.endswith("/grub"): install = install[0:-5]
|
|
||||||
cmds.append(f"--target={target}")
|
cmds.append(f"--target={target}")
|
||||||
cmds.append(f"--directory={grub}")
|
cmds.append(f"--directory={grub}")
|
||||||
|
|
||||||
mods.append("part_msdos")
|
mods.append("part_msdos")
|
||||||
mods.append("part_gpt")
|
mods.append("part_gpt")
|
||||||
rootfs = ctx.fstab.find_target("/")
|
|
||||||
|
# grub install base folder in mount (WORKSPACE/TARGET/mount/boot)
|
||||||
mnt_install = os.path.join(mount, install)
|
mnt_install = os.path.join(mount, install)
|
||||||
cmds.append(f"--boot-directory={mnt_install}")
|
cmds.append(f"--boot-directory={mnt_install}")
|
||||||
|
|
||||||
|
# find out mount point of rootfs
|
||||||
|
rootfs = ctx.fstab.find_target("/")
|
||||||
if rootfs is None or len(rootfs) <= 0 or rootfs[0] is None:
|
if rootfs is None or len(rootfs) <= 0 or rootfs[0] is None:
|
||||||
raise RuntimeError("rootfs mount point not found")
|
raise RuntimeError("rootfs mount point not found")
|
||||||
rootfs = rootfs[0]
|
rootfs = rootfs[0]
|
||||||
|
|
||||||
|
# add filesystem module for rootfs
|
||||||
mods.append(fstype_to_mod(rootfs.fstype))
|
mods.append(fstype_to_mod(rootfs.fstype))
|
||||||
if len(mods) > 0:
|
if len(mods) > 0:
|
||||||
cmds.append("--modules=" + (" ".join(mods)))
|
cmds.append("--modules=" + (" ".join(mods)))
|
||||||
|
|
||||||
|
# detect grub boot sector install device
|
||||||
device = ctx.get("grub.device", None)
|
device = ctx.get("grub.device", None)
|
||||||
if device is None:
|
if device is None:
|
||||||
source = rootfs.source
|
source = rootfs.source
|
||||||
if source in ctx.fsmap:
|
if source in ctx.fsmap:
|
||||||
source = ctx.fsmap[source]
|
source = ctx.fsmap[source]
|
||||||
|
|
||||||
|
# loop setup by builder.disk.image.ImageBuilder
|
||||||
if not source.startswith("/dev/loop"):
|
if not source.startswith("/dev/loop"):
|
||||||
raise RuntimeError("no device to detect grub install")
|
raise RuntimeError("no device to detect grub install")
|
||||||
|
|
||||||
|
# loop offset partition setup by builder.disk.layout.build.DiskLayoutBuilder
|
||||||
if loop_get_offset(source) <= 0:
|
if loop_get_offset(source) <= 0:
|
||||||
raise RuntimeError("no loop part to detect grub install")
|
raise RuntimeError("no loop part to detect grub install")
|
||||||
|
|
||||||
|
# backing as parent disk
|
||||||
device = loop_get_backing(source)
|
device = loop_get_backing(source)
|
||||||
if device is None:
|
if device is None:
|
||||||
raise RuntimeError("no device for grub install")
|
raise RuntimeError("no device for grub install")
|
||||||
cmds.append(device)
|
cmds.append(device)
|
||||||
|
|
||||||
|
# run grub-install
|
||||||
ret = ctx.run_external(cmds)
|
ret = ctx.run_external(cmds)
|
||||||
if ret != 0: raise OSError("grub-install failed")
|
if ret != 0: raise OSError("grub-install failed")
|
||||||
|
|
||||||
|
# copy grub installed target from mount to rootfs
|
||||||
src = os.path.join(mnt_install, "grub")
|
src = os.path.join(mnt_install, "grub")
|
||||||
dst = os.path.join(root, install, "grub")
|
dst = os.path.join(root, install, "grub")
|
||||||
shutil.copytree(src, dst, dirs_exist_ok=True)
|
shutil.copytree(src, dst, dirs_exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
def proc_install(ctx: ArchBuilderContext):
|
def proc_install(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Process GRUB targets install
|
||||||
|
"""
|
||||||
targets: list[str] = ctx.get("grub.targets", [])
|
targets: list[str] = ctx.get("grub.targets", [])
|
||||||
for target in targets:
|
for target in targets:
|
||||||
if target == "i386-pc":
|
if target == "i386-pc":
|
||||||
@ -281,12 +431,18 @@ def proc_install(ctx: ArchBuilderContext):
|
|||||||
|
|
||||||
|
|
||||||
def proc_grub(ctx: ArchBuilderContext):
|
def proc_grub(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Install GRUB bootloader
|
||||||
|
"""
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
|
|
||||||
|
# get grub install folder in rootfs (WORKSPACE/TARGET/rootfs/boot/grub)
|
||||||
install: str = ctx.get("grub.path", "/boot/grub")
|
install: str = ctx.get("grub.path", "/boot/grub")
|
||||||
if install.startswith("/"):
|
if install.startswith("/"):
|
||||||
install = install[1:]
|
install = install[1:]
|
||||||
install = os.path.join(root, install)
|
install = os.path.join(root, install)
|
||||||
os.makedirs(install, mode=0o0755, exist_ok=True)
|
os.makedirs(install, mode=0o0755, exist_ok=True)
|
||||||
|
|
||||||
proc_config(ctx, install)
|
proc_config(ctx, install)
|
||||||
proc_targets(ctx, install)
|
proc_targets(ctx, install)
|
||||||
proc_install(ctx)
|
proc_install(ctx)
|
||||||
|
@ -8,49 +8,77 @@ log = getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def reset_locale(ctx: ArchBuilderContext):
|
def reset_locale(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Remove old locale settings
|
||||||
|
"""
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
archive = os.path.join(root, "usr/lib/locale/locale-archive")
|
archive = os.path.join(root, "usr/lib/locale/locale-archive")
|
||||||
if os.path.exists(archive): os.remove(archive)
|
if os.path.exists(archive): os.remove(archive)
|
||||||
|
|
||||||
|
|
||||||
def enable_all(ctx: ArchBuilderContext):
|
def enable_all(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Add all enabled locale for build locale-archive
|
||||||
|
"""
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
|
|
||||||
|
# default none
|
||||||
locales = ctx.get("locale.enable", [])
|
locales = ctx.get("locale.enable", [])
|
||||||
log.info("setup enabled locale")
|
log.info("setup enabled locale")
|
||||||
|
|
||||||
|
# create locale.gen
|
||||||
file = os.path.join(root, "etc/locale.gen")
|
file = os.path.join(root, "etc/locale.gen")
|
||||||
with open_config(file) as f:
|
with open_config(file) as f:
|
||||||
for line in locales:
|
for line in locales:
|
||||||
log.debug(f"adding locale {line}")
|
log.debug(f"adding locale {line}")
|
||||||
f.write(line)
|
f.write(line)
|
||||||
f.write("\n")
|
f.write(os.linesep)
|
||||||
if len(locales) == 0:
|
if len(locales) == 0:
|
||||||
f.write("# No any locales enabled\n")
|
f.write("# No any locales enabled\n")
|
||||||
|
|
||||||
|
# run locale-gen
|
||||||
filesystem.chroot_run(ctx, "locale-gen")
|
filesystem.chroot_run(ctx, "locale-gen")
|
||||||
|
|
||||||
|
|
||||||
def set_default(ctx: ArchBuilderContext):
|
def set_default(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Setup default locale
|
||||||
|
"""
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
default = ctx.get("locale.default", None)
|
|
||||||
if default is None: default = "C"
|
# default to C
|
||||||
|
default = ctx.get("locale.default", "C")
|
||||||
log.info(f"default locale: {default}")
|
log.info(f"default locale: {default}")
|
||||||
|
|
||||||
|
# default locale config (see man:locale.conf(5))
|
||||||
conf = os.path.join(root, "etc/locale.conf")
|
conf = os.path.join(root, "etc/locale.conf")
|
||||||
with open_config(conf) as f:
|
with open_config(conf) as f:
|
||||||
f.write(f"LANG={default}\n")
|
f.write(f"LANG={default}\n")
|
||||||
|
|
||||||
|
|
||||||
def set_timezone(ctx: ArchBuilderContext):
|
def set_timezone(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Setup tzdata timezone info
|
||||||
|
"""
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
timezone = ctx.get("timezone", None)
|
|
||||||
if timezone is None: timezone = "UTC"
|
# default to UTC
|
||||||
|
timezone = ctx.get("timezone", "UTC")
|
||||||
log.info(f"timezone: {timezone}")
|
log.info(f"timezone: {timezone}")
|
||||||
|
|
||||||
|
# tzdata install path
|
||||||
dst = os.path.join("/usr/share/zoneinfo", timezone)
|
dst = os.path.join("/usr/share/zoneinfo", timezone)
|
||||||
real = os.path.join(root, dst[1:])
|
real = os.path.join(root, dst[1:])
|
||||||
if not os.path.exists(real): raise ArchBuilderConfigError(
|
if not os.path.exists(real): raise ArchBuilderConfigError(
|
||||||
f"timezone {timezone} not found"
|
f"timezone {timezone} not found"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# localtime symbolic link (see man:tzset(3))
|
||||||
lnk = os.path.join(root, "etc/localtime")
|
lnk = os.path.join(root, "etc/localtime")
|
||||||
if os.path.exists(lnk): os.remove(lnk)
|
if os.path.exists(lnk): os.remove(lnk)
|
||||||
os.symlink(dst, lnk)
|
os.symlink(dst, lnk)
|
||||||
|
|
||||||
|
# timezone file
|
||||||
conf = os.path.join(root, "etc/timezone")
|
conf = os.path.join(root, "etc/timezone")
|
||||||
with open(conf, "w") as f:
|
with open(conf, "w") as f:
|
||||||
f.write(timezone)
|
f.write(timezone)
|
||||||
@ -58,6 +86,9 @@ def set_timezone(ctx: ArchBuilderContext):
|
|||||||
|
|
||||||
|
|
||||||
def proc_locale(ctx: ArchBuilderContext):
|
def proc_locale(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Setup user locale settings
|
||||||
|
"""
|
||||||
reset_locale(ctx)
|
reset_locale(ctx)
|
||||||
enable_all(ctx)
|
enable_all(ctx)
|
||||||
set_default(ctx)
|
set_default(ctx)
|
||||||
|
@ -17,22 +17,37 @@ def add_values(ctx: ArchBuilderContext, key: str, arr: list[str]):
|
|||||||
|
|
||||||
|
|
||||||
def gen_config(ctx: ArchBuilderContext):
|
def gen_config(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Generate mkinitcpio.conf
|
||||||
|
"""
|
||||||
modules: list[str] = []
|
modules: list[str] = []
|
||||||
binaries: list[str] = []
|
binaries: list[str] = []
|
||||||
files: list[str] = []
|
files: list[str] = []
|
||||||
hooks: list[str] = []
|
hooks: list[str] = []
|
||||||
|
|
||||||
|
# add default hooks
|
||||||
hooks.append("base")
|
hooks.append("base")
|
||||||
hooks.append("systemd")
|
hooks.append("systemd")
|
||||||
hooks.append("autodetect")
|
hooks.append("autodetect")
|
||||||
|
|
||||||
|
# add microcode if x86_64 (amd-ucode, intel-ucode)
|
||||||
if ctx.cur_arch in ["x86_64", "i386"]:
|
if ctx.cur_arch in ["x86_64", "i386"]:
|
||||||
hooks.append("microcode")
|
hooks.append("microcode")
|
||||||
|
|
||||||
hooks.append("modconf")
|
hooks.append("modconf")
|
||||||
|
|
||||||
|
# do not add keymap by default
|
||||||
if ctx.get("mkinitcpio.hooks.keymap", False):
|
if ctx.get("mkinitcpio.hooks.keymap", False):
|
||||||
hooks.extend(["kms", "keyboard", "keymap", "consolefont"])
|
hooks.extend(["kms", "keymap", "consolefont"])
|
||||||
hooks.extend(["block", "filesystems", "fsck"])
|
|
||||||
|
hooks.extend(["keyboard", "block", "filesystems", "fsck"])
|
||||||
|
|
||||||
|
# add others values
|
||||||
add_values(ctx, "mkinitcpio.modules", modules)
|
add_values(ctx, "mkinitcpio.modules", modules)
|
||||||
add_values(ctx, "mkinitcpio.binaries", binaries)
|
add_values(ctx, "mkinitcpio.binaries", binaries)
|
||||||
add_values(ctx, "mkinitcpio.files", files)
|
add_values(ctx, "mkinitcpio.files", files)
|
||||||
|
|
||||||
|
# write mkinitcpio.conf to rootfs
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
cfg = os.path.join(root, "etc/mkinitcpio.conf")
|
cfg = os.path.join(root, "etc/mkinitcpio.conf")
|
||||||
with open_config(cfg) as f:
|
with open_config(cfg) as f:
|
||||||
@ -40,27 +55,47 @@ def gen_config(ctx: ArchBuilderContext):
|
|||||||
f.write("BINARIES=(%s)\n" % (" ".join(binaries)))
|
f.write("BINARIES=(%s)\n" % (" ".join(binaries)))
|
||||||
f.write("FILES=(%s)\n" % (" ".join(files)))
|
f.write("FILES=(%s)\n" % (" ".join(files)))
|
||||||
f.write("HOOKS=(%s)\n" % (" ".join(hooks)))
|
f.write("HOOKS=(%s)\n" % (" ".join(hooks)))
|
||||||
|
# TODO: add more options
|
||||||
|
|
||||||
|
|
||||||
def recreate_initrd(ctx: ArchBuilderContext, path: str):
|
def recreate_initrd(ctx: ArchBuilderContext, path: str):
|
||||||
|
"""
|
||||||
|
Really run mkinitcpio
|
||||||
|
"""
|
||||||
chroot_run(ctx, ["mkinitcpio", "-p", path])
|
chroot_run(ctx, ["mkinitcpio", "-p", path])
|
||||||
|
# do not check return value of mkinitcpio
|
||||||
|
|
||||||
|
|
||||||
def recreate_initrd_no_autodetect(ctx: ArchBuilderContext, path: str):
|
def recreate_initrd_no_autodetect(ctx: ArchBuilderContext, path: str):
|
||||||
|
"""
|
||||||
|
Create a full initramfs without autodetect
|
||||||
|
In build stage, mkinitcpio can not find out needs modules, it will cause unbootable.
|
||||||
|
"""
|
||||||
tmp = os.path.join(ctx.get_rootfs(), "tmp")
|
tmp = os.path.join(ctx.get_rootfs(), "tmp")
|
||||||
with NamedTemporaryFile("w", dir=tmp) as temp:
|
with NamedTemporaryFile("w", dir=tmp) as temp:
|
||||||
|
|
||||||
|
# copy original preset
|
||||||
with open(path, "r") as f:
|
with open(path, "r") as f:
|
||||||
temp.write(f.read())
|
temp.write(f.read())
|
||||||
|
|
||||||
|
# skip autodetect
|
||||||
temp.write("\ndefault_options=\"-S autodetect\"\n")
|
temp.write("\ndefault_options=\"-S autodetect\"\n")
|
||||||
temp.flush()
|
temp.flush()
|
||||||
|
|
||||||
|
# run mkinitcpio (with path in rootfs)
|
||||||
path = os.path.join("/tmp", os.path.basename(temp.name))
|
path = os.path.join("/tmp", os.path.basename(temp.name))
|
||||||
recreate_initrd(ctx, path)
|
recreate_initrd(ctx, path)
|
||||||
|
|
||||||
|
|
||||||
def recreate_initrds(ctx: ArchBuilderContext):
|
def recreate_initrds(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Regenerate all initramfs
|
||||||
|
"""
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
no_autodetect = ctx.get("mkinitcpio.no_autodetect", True)
|
no_autodetect = ctx.get("mkinitcpio.no_autodetect", True)
|
||||||
folder = os.path.join(root, "etc/mkinitcpio.d")
|
folder = os.path.join(root, "etc/mkinitcpio.d")
|
||||||
|
|
||||||
|
# scan all initramfs preset and regenerate them
|
||||||
for preset in os.listdir(folder):
|
for preset in os.listdir(folder):
|
||||||
if not preset.endswith(".preset"): continue
|
if not preset.endswith(".preset"): continue
|
||||||
path = os.path.join(folder, preset)
|
path = os.path.join(folder, preset)
|
||||||
@ -69,5 +104,8 @@ def recreate_initrds(ctx: ArchBuilderContext):
|
|||||||
|
|
||||||
|
|
||||||
def proc_mkinitcpio(ctx: ArchBuilderContext):
|
def proc_mkinitcpio(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Process mkinitcpio options
|
||||||
|
"""
|
||||||
gen_config(ctx)
|
gen_config(ctx)
|
||||||
recreate_initrds(ctx)
|
recreate_initrds(ctx)
|
||||||
|
@ -82,9 +82,11 @@ def init_mount(ctx: ArchBuilderContext):
|
|||||||
real = os.path.realpath(os.path.join(root, target))
|
real = os.path.realpath(os.path.join(root, target))
|
||||||
do_mount(ctx, source, real, fstype, options)
|
do_mount(ctx, source, real, fstype, options)
|
||||||
try:
|
try:
|
||||||
|
# ensure mount point is clean
|
||||||
mnts = MountTab.parse_mounts()
|
mnts = MountTab.parse_mounts()
|
||||||
if any(mnts.find_folder(ctx.work)):
|
if any(mnts.find_folder(ctx.work)):
|
||||||
raise RuntimeError("mount points not cleanup")
|
raise RuntimeError("mount points not cleanup")
|
||||||
|
|
||||||
root_mount("proc", "proc", "proc", "nosuid,noexec,nodev")
|
root_mount("proc", "proc", "proc", "nosuid,noexec,nodev")
|
||||||
root_mount("sys", "sys", "sysfs", "nosuid,noexec,nodev,ro")
|
root_mount("sys", "sys", "sysfs", "nosuid,noexec,nodev,ro")
|
||||||
root_mount("dev", "dev", "devtmpfs", "mode=0755,nosuid")
|
root_mount("dev", "dev", "devtmpfs", "mode=0755,nosuid")
|
||||||
@ -92,6 +94,8 @@ def init_mount(ctx: ArchBuilderContext):
|
|||||||
root_mount("shm", "dev/shm", "tmpfs", "mode=1777,nosuid,nodev")
|
root_mount("shm", "dev/shm", "tmpfs", "mode=1777,nosuid,nodev")
|
||||||
root_mount("run", "run", "tmpfs", "nosuid,nodev,mode=0755")
|
root_mount("run", "run", "tmpfs", "nosuid,nodev,mode=0755")
|
||||||
root_mount("tmp", "tmp", "tmpfs", "mode=1777,strictatime,nodev,nosuid")
|
root_mount("tmp", "tmp", "tmpfs", "mode=1777,strictatime,nodev,nosuid")
|
||||||
|
|
||||||
|
# symbolic links for some script tools (e.g. mkinitcpio)
|
||||||
symlink("/proc/self/fd", "dev", "fd")
|
symlink("/proc/self/fd", "dev", "fd")
|
||||||
symlink("/proc/self/fd/0", "dev", "stdin")
|
symlink("/proc/self/fd/0", "dev", "stdin")
|
||||||
symlink("/proc/self/fd/1", "dev", "stdout")
|
symlink("/proc/self/fd/1", "dev", "stdout")
|
||||||
|
@ -7,6 +7,9 @@ log = getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def gen_machine_info(ctx: ArchBuilderContext):
|
def gen_machine_info(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Generate /etc/machine-info for systemd
|
||||||
|
"""
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
file = os.path.join(root, "etc/machine-info")
|
file = os.path.join(root, "etc/machine-info")
|
||||||
cfg = ctx.get("sysconf")
|
cfg = ctx.get("sysconf")
|
||||||
@ -22,6 +25,9 @@ def gen_machine_info(ctx: ArchBuilderContext):
|
|||||||
|
|
||||||
|
|
||||||
def gen_hosts(ctx: ArchBuilderContext):
|
def gen_hosts(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Generate /etc/hosts
|
||||||
|
"""
|
||||||
addrs: list[str] = []
|
addrs: list[str] = []
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
file = os.path.join(root, "etc/hosts")
|
file = os.path.join(root, "etc/hosts")
|
||||||
@ -33,6 +39,8 @@ def gen_hosts(ctx: ArchBuilderContext):
|
|||||||
addrs.append(s[0])
|
addrs.append(s[0])
|
||||||
f.write(addr)
|
f.write(addr)
|
||||||
f.write(os.linesep)
|
f.write(os.linesep)
|
||||||
|
|
||||||
|
# 127.0.1.1 not set, add for FQDN
|
||||||
name = ctx.get("sysconf.hostname")
|
name = ctx.get("sysconf.hostname")
|
||||||
if "127.0.1.1" not in addrs and name:
|
if "127.0.1.1" not in addrs and name:
|
||||||
f.write(f"127.0.1.1 {name}\n")
|
f.write(f"127.0.1.1 {name}\n")
|
||||||
@ -40,6 +48,9 @@ def gen_hosts(ctx: ArchBuilderContext):
|
|||||||
|
|
||||||
|
|
||||||
def gen_hostname(ctx: ArchBuilderContext):
|
def gen_hostname(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Generate /etc/hostname
|
||||||
|
"""
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
file = os.path.join(root, "etc/hostname")
|
file = os.path.join(root, "etc/hostname")
|
||||||
name = ctx.get("sysconf.hostname")
|
name = ctx.get("sysconf.hostname")
|
||||||
@ -51,6 +62,9 @@ def gen_hostname(ctx: ArchBuilderContext):
|
|||||||
|
|
||||||
|
|
||||||
def gen_environments(ctx: ArchBuilderContext):
|
def gen_environments(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Generate /etc/environments
|
||||||
|
"""
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
file = os.path.join(root, "etc/environment")
|
file = os.path.join(root, "etc/environment")
|
||||||
envs: dict[str] = ctx.get("sysconf.environments", [])
|
envs: dict[str] = ctx.get("sysconf.environments", [])
|
||||||
@ -62,6 +76,9 @@ def gen_environments(ctx: ArchBuilderContext):
|
|||||||
|
|
||||||
|
|
||||||
def proc_names(ctx: ArchBuilderContext):
|
def proc_names(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Apply names for system configs
|
||||||
|
"""
|
||||||
gen_machine_info(ctx)
|
gen_machine_info(ctx)
|
||||||
gen_environments(ctx)
|
gen_environments(ctx)
|
||||||
gen_hostname(ctx)
|
gen_hostname(ctx)
|
||||||
|
@ -7,6 +7,9 @@ log = getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def install_all(ctx: ArchBuilderContext, pacman: Pacman):
|
def install_all(ctx: ArchBuilderContext, pacman: Pacman):
|
||||||
|
"""
|
||||||
|
Install all pacman packages
|
||||||
|
"""
|
||||||
packages = ctx.get("pacman.install", [])
|
packages = ctx.get("pacman.install", [])
|
||||||
if len(packages) <= 0: return
|
if len(packages) <= 0: return
|
||||||
log.info("installing packages: %s", " ".join(packages))
|
log.info("installing packages: %s", " ".join(packages))
|
||||||
@ -14,15 +17,24 @@ def install_all(ctx: ArchBuilderContext, pacman: Pacman):
|
|||||||
|
|
||||||
|
|
||||||
def install_all_keyring(ctx: ArchBuilderContext, pacman: Pacman):
|
def install_all_keyring(ctx: ArchBuilderContext, pacman: Pacman):
|
||||||
|
"""
|
||||||
|
Install all pacman keyring packages before normal packages
|
||||||
|
"""
|
||||||
packages: list[str] = ctx.get("pacman.install", [])
|
packages: list[str] = ctx.get("pacman.install", [])
|
||||||
if len(packages) <= 0: return
|
if len(packages) <= 0: return
|
||||||
|
|
||||||
|
# find out all keyring packages
|
||||||
keyrings = [pkg for pkg in packages if pkg.endswith("-keyring")]
|
keyrings = [pkg for pkg in packages if pkg.endswith("-keyring")]
|
||||||
if len(keyrings) <= 0: return
|
if len(keyrings) <= 0: return
|
||||||
|
|
||||||
log.info("installing keyrings: %s", " ".join(keyrings))
|
log.info("installing keyrings: %s", " ".join(keyrings))
|
||||||
pacman.add_trust_keyring_pkg(keyrings)
|
pacman.add_trust_keyring_pkg(keyrings)
|
||||||
|
|
||||||
|
|
||||||
def uninstall_all(ctx: ArchBuilderContext, pacman: Pacman):
|
def uninstall_all(ctx: ArchBuilderContext, pacman: Pacman):
|
||||||
|
"""
|
||||||
|
Remove all specified pacman packages
|
||||||
|
"""
|
||||||
packages = ctx.get("pacman.uninstall", [])
|
packages = ctx.get("pacman.uninstall", [])
|
||||||
if len(packages) <= 0: return
|
if len(packages) <= 0: return
|
||||||
log.info("uninstalling packages: %s", " ".join(packages))
|
log.info("uninstalling packages: %s", " ".join(packages))
|
||||||
@ -30,6 +42,9 @@ def uninstall_all(ctx: ArchBuilderContext, pacman: Pacman):
|
|||||||
|
|
||||||
|
|
||||||
def append_config(ctx: ArchBuilderContext, lines: list[str]):
|
def append_config(ctx: ArchBuilderContext, lines: list[str]):
|
||||||
|
"""
|
||||||
|
Generate basic pacman.conf for rootfs
|
||||||
|
"""
|
||||||
lines.append("[options]\n")
|
lines.append("[options]\n")
|
||||||
lines.append("HoldPkg = pacman glibc filesystem\n")
|
lines.append("HoldPkg = pacman glibc filesystem\n")
|
||||||
lines.append(f"Architecture = {ctx.tgt_arch}\n")
|
lines.append(f"Architecture = {ctx.tgt_arch}\n")
|
||||||
@ -43,6 +58,9 @@ def append_config(ctx: ArchBuilderContext, lines: list[str]):
|
|||||||
|
|
||||||
|
|
||||||
def gen_config(ctx: ArchBuilderContext, pacman: Pacman):
|
def gen_config(ctx: ArchBuilderContext, pacman: Pacman):
|
||||||
|
"""
|
||||||
|
Generate full pacman.conf for rootfs
|
||||||
|
"""
|
||||||
conf = os.path.join(ctx.get_rootfs(), "etc/pacman.conf")
|
conf = os.path.join(ctx.get_rootfs(), "etc/pacman.conf")
|
||||||
lines: list[str] = []
|
lines: list[str] = []
|
||||||
append_config(ctx, lines)
|
append_config(ctx, lines)
|
||||||
@ -53,17 +71,30 @@ def gen_config(ctx: ArchBuilderContext, pacman: Pacman):
|
|||||||
|
|
||||||
|
|
||||||
def proc_pacman(ctx: ArchBuilderContext, pacman: Pacman):
|
def proc_pacman(ctx: ArchBuilderContext, pacman: Pacman):
|
||||||
|
"""
|
||||||
|
Install or remove packages for rootfs, and generate pacman.conf
|
||||||
|
"""
|
||||||
install_all(ctx, pacman)
|
install_all(ctx, pacman)
|
||||||
uninstall_all(ctx, pacman)
|
uninstall_all(ctx, pacman)
|
||||||
gen_config(ctx, pacman)
|
gen_config(ctx, pacman)
|
||||||
|
|
||||||
|
|
||||||
def proc_pacman_keyring(ctx: ArchBuilderContext, pacman: Pacman):
|
def proc_pacman_keyring(ctx: ArchBuilderContext, pacman: Pacman):
|
||||||
|
"""
|
||||||
|
Early install keyring packages
|
||||||
|
"""
|
||||||
install_all_keyring(ctx, pacman)
|
install_all_keyring(ctx, pacman)
|
||||||
|
|
||||||
|
|
||||||
def trust_all(ctx: ArchBuilderContext, pacman: Pacman):
|
def trust_all(ctx: ArchBuilderContext, pacman: Pacman):
|
||||||
|
"""
|
||||||
|
Early trust keyring for database and keyring packages
|
||||||
|
"""
|
||||||
if not ctx.gpgcheck: return
|
if not ctx.gpgcheck: return
|
||||||
trust = ctx.get("pacman.trust", [])
|
trust = ctx.get("pacman.trust", [])
|
||||||
|
|
||||||
|
# receive all keys now
|
||||||
pacman.recv_keys(trust)
|
pacman.recv_keys(trust)
|
||||||
|
|
||||||
|
# local sign keys
|
||||||
for key in trust: pacman.lsign_key(key)
|
for key in trust: pacman.lsign_key(key)
|
||||||
|
@ -6,12 +6,19 @@ log = getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def proc_systemd(ctx: ArchBuilderContext):
|
def proc_systemd(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Enable or disable systemd units files, and set default target
|
||||||
|
"""
|
||||||
systemd_comp.enable(ctx, ctx.get("systemd.enable", []))
|
systemd_comp.enable(ctx, ctx.get("systemd.enable", []))
|
||||||
systemd_comp.disable(ctx, ctx.get("systemd.disable", []))
|
systemd_comp.disable(ctx, ctx.get("systemd.disable", []))
|
||||||
systemd_comp.set_default(ctx, ctx.get("systemd.default", None))
|
systemd_comp.set_default(ctx, ctx.get("systemd.default", None))
|
||||||
|
|
||||||
|
|
||||||
def proc_machine_id(ctx: ArchBuilderContext):
|
def proc_machine_id(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Remove or set machine-id
|
||||||
|
Never duplicate machine id, it should generate when first boot
|
||||||
|
"""
|
||||||
id = ctx.get("machine-id", "")
|
id = ctx.get("machine-id", "")
|
||||||
root = ctx.get_rootfs()
|
root = ctx.get_rootfs()
|
||||||
mid = os.path.join(root, "etc/machine-id")
|
mid = os.path.join(root, "etc/machine-id")
|
||||||
|
@ -6,61 +6,94 @@ log = getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
def proc_user(ctx: ArchBuilderContext, cfg: dict):
|
def proc_user(ctx: ArchBuilderContext, cfg: dict):
|
||||||
|
"""
|
||||||
|
Create a new user and set password
|
||||||
|
"""
|
||||||
if "name" not in cfg: raise ArchBuilderConfigError("username not set")
|
if "name" not in cfg: raise ArchBuilderConfigError("username not set")
|
||||||
name = cfg["name"]
|
name = cfg["name"]
|
||||||
cmds = []
|
cmds = []
|
||||||
if ctx.passwd.lookup_name(name) is None:
|
if ctx.passwd.lookup_name(name) is None:
|
||||||
|
# user is not exists, create it
|
||||||
cmds.append("useradd")
|
cmds.append("useradd")
|
||||||
cmds.append("-m")
|
cmds.append("-m") # create home
|
||||||
action = "created"
|
action = "created"
|
||||||
else:
|
else:
|
||||||
|
# user is already exists, modify it
|
||||||
cmds.append("usermod")
|
cmds.append("usermod")
|
||||||
action = "modified"
|
action = "modified"
|
||||||
|
|
||||||
|
# add all options
|
||||||
if "uid" in cfg: cmds.extend(["-u", str(cfg["uid"])])
|
if "uid" in cfg: cmds.extend(["-u", str(cfg["uid"])])
|
||||||
if "gid" in cfg: cmds.extend(["-g", str(cfg["gid"])])
|
if "gid" in cfg: cmds.extend(["-g", str(cfg["gid"])])
|
||||||
if "home" in cfg: cmds.extend(["-d", cfg["home"]])
|
if "home" in cfg: cmds.extend(["-d", cfg["home"]])
|
||||||
if "shell" in cfg: cmds.extend(["-s", cfg["shell"]])
|
if "shell" in cfg: cmds.extend(["-s", cfg["shell"]])
|
||||||
if "groups" in cfg: cmds.extend(["-G", str(cfg["groups"])])
|
if "groups" in cfg: cmds.extend(["-G", str(cfg["groups"])])
|
||||||
cmds.append(name)
|
cmds.append(name)
|
||||||
|
|
||||||
|
# run useradd or usermod
|
||||||
ret = chroot_run(ctx, cmds)
|
ret = chroot_run(ctx, cmds)
|
||||||
if ret != 0: raise OSError(f"{cmds[0]} failed")
|
if ret != 0: raise OSError(f"{cmds[0]} failed")
|
||||||
|
|
||||||
|
# we want to set a password for user
|
||||||
if "password" in cfg:
|
if "password" in cfg:
|
||||||
cmds = ["chpasswd"]
|
cmds = ["chpasswd"]
|
||||||
text = f"{name}:{cfg['password']}\n"
|
text = f"{name}:{cfg['password']}\n"
|
||||||
ret = chroot_run(ctx, cmds, stdin=text)
|
ret = chroot_run(ctx, cmds, stdin=text)
|
||||||
if ret != 0: raise OSError("chpasswd failed")
|
if ret != 0: raise OSError("chpasswd failed")
|
||||||
|
|
||||||
|
# reload user database
|
||||||
ctx.reload_passwd()
|
ctx.reload_passwd()
|
||||||
log.info(f"{action} user {name}")
|
log.info(f"{action} user {name}")
|
||||||
|
|
||||||
|
|
||||||
def proc_group(ctx: ArchBuilderContext, cfg: dict):
|
def proc_group(ctx: ArchBuilderContext, cfg: dict):
|
||||||
|
"""
|
||||||
|
Create a new group
|
||||||
|
"""
|
||||||
if "name" not in cfg: raise ArchBuilderConfigError("groupname not set")
|
if "name" not in cfg: raise ArchBuilderConfigError("groupname not set")
|
||||||
name = cfg["name"]
|
name = cfg["name"]
|
||||||
cmds = []
|
cmds = []
|
||||||
if ctx.passwd.lookup_name(name) is None:
|
if ctx.passwd.lookup_name(name) is None:
|
||||||
|
# group is not exists, create it
|
||||||
cmds.append("groupadd")
|
cmds.append("groupadd")
|
||||||
action = "created"
|
action = "created"
|
||||||
else:
|
else:
|
||||||
|
# group is already exists, modify it
|
||||||
cmds.append("groupmod")
|
cmds.append("groupmod")
|
||||||
action = "modified"
|
action = "modified"
|
||||||
|
|
||||||
|
# add all options
|
||||||
if "gid" in cfg: cmds.extend(["-g", str(cfg["gid"])])
|
if "gid" in cfg: cmds.extend(["-g", str(cfg["gid"])])
|
||||||
cmds.append(name)
|
cmds.append(name)
|
||||||
|
|
||||||
|
# run groupadd or groupmod
|
||||||
ret = chroot_run(ctx, cmds)
|
ret = chroot_run(ctx, cmds)
|
||||||
if ret != 0: raise OSError(f"{name} failed")
|
if ret != 0: raise OSError(f"{name} failed")
|
||||||
|
|
||||||
|
# reload user database
|
||||||
ctx.reload_passwd()
|
ctx.reload_passwd()
|
||||||
log.info(f"{action} group {name}")
|
log.info(f"{action} group {name}")
|
||||||
|
|
||||||
|
|
||||||
def proc_users(ctx: ArchBuilderContext):
|
def proc_users(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Create all users
|
||||||
|
"""
|
||||||
for user in ctx.get("sysconf.user", []):
|
for user in ctx.get("sysconf.user", []):
|
||||||
proc_user(ctx, user)
|
proc_user(ctx, user)
|
||||||
|
|
||||||
|
|
||||||
def proc_groups(ctx: ArchBuilderContext):
|
def proc_groups(ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Create all groups
|
||||||
|
"""
|
||||||
for group in ctx.get("sysconf.group", []):
|
for group in ctx.get("sysconf.group", []):
|
||||||
proc_group(ctx, group)
|
proc_group(ctx, group)
|
||||||
|
|
||||||
|
|
||||||
def proc_usergroup(ctx: ArchBuilderContext):
|
def proc_usergroup(ctx: ArchBuilderContext):
|
||||||
proc_groups(ctx)
|
"""
|
||||||
|
Create all users and groups
|
||||||
|
"""
|
||||||
|
proc_groups(ctx) # create groups before users
|
||||||
proc_users(ctx)
|
proc_users(ctx)
|
||||||
|
@ -39,6 +39,9 @@ class Pacman:
|
|||||||
caches: list[str]
|
caches: list[str]
|
||||||
|
|
||||||
def append_repos(self, lines: list[str]):
|
def append_repos(self, lines: list[str]):
|
||||||
|
"""
|
||||||
|
Add all databases into config
|
||||||
|
"""
|
||||||
for repo in self.databases:
|
for repo in self.databases:
|
||||||
db = self.databases[repo]
|
db = self.databases[repo]
|
||||||
lines.append(f"[{repo}]\n")
|
lines.append(f"[{repo}]\n")
|
||||||
@ -47,6 +50,9 @@ class Pacman:
|
|||||||
lines.append(f"Server = {server}\n")
|
lines.append(f"Server = {server}\n")
|
||||||
|
|
||||||
def append_config(self, lines: list[str]):
|
def append_config(self, lines: list[str]):
|
||||||
|
"""
|
||||||
|
Add basic pacman config for host
|
||||||
|
"""
|
||||||
siglevel = ("Required DatabaseOptional" if self.ctx.gpgcheck else "Never")
|
siglevel = ("Required DatabaseOptional" if self.ctx.gpgcheck else "Never")
|
||||||
lines.append("[options]\n")
|
lines.append("[options]\n")
|
||||||
for cache in self.caches:
|
for cache in self.caches:
|
||||||
@ -66,6 +72,9 @@ class Pacman:
|
|||||||
self.append_repos(lines)
|
self.append_repos(lines)
|
||||||
|
|
||||||
def init_keyring(self):
|
def init_keyring(self):
|
||||||
|
"""
|
||||||
|
Initialize pacman keyring
|
||||||
|
"""
|
||||||
path = os.path.join(self.ctx.work, "rootfs")
|
path = os.path.join(self.ctx.work, "rootfs")
|
||||||
keyring = os.path.join(path, "etc/pacman.d/gnupg")
|
keyring = os.path.join(path, "etc/pacman.d/gnupg")
|
||||||
if not self.ctx.gpgcheck: return
|
if not self.ctx.gpgcheck: return
|
||||||
@ -76,6 +85,9 @@ class Pacman:
|
|||||||
self.pacman_key(["--init"])
|
self.pacman_key(["--init"])
|
||||||
|
|
||||||
def init_config(self):
|
def init_config(self):
|
||||||
|
"""
|
||||||
|
Create host pacman.conf
|
||||||
|
"""
|
||||||
config = os.path.join(self.ctx.work, "pacman.conf")
|
config = os.path.join(self.ctx.work, "pacman.conf")
|
||||||
if os.path.exists(config):
|
if os.path.exists(config):
|
||||||
os.remove(config)
|
os.remove(config)
|
||||||
@ -88,6 +100,9 @@ class Pacman:
|
|||||||
f.writelines(lines)
|
f.writelines(lines)
|
||||||
|
|
||||||
def pacman_key(self, args: list[str]):
|
def pacman_key(self, args: list[str]):
|
||||||
|
"""
|
||||||
|
Call pacman-key for rootfs
|
||||||
|
"""
|
||||||
if not self.ctx.gpgcheck:
|
if not self.ctx.gpgcheck:
|
||||||
raise RuntimeError("GPG check disabled")
|
raise RuntimeError("GPG check disabled")
|
||||||
keyring = os.path.join(self.root, "etc/pacman.d/gnupg")
|
keyring = os.path.join(self.root, "etc/pacman.d/gnupg")
|
||||||
@ -100,6 +115,9 @@ class Pacman:
|
|||||||
if ret != 0: raise OSError(f"pacman-key failed with {ret}")
|
if ret != 0: raise OSError(f"pacman-key failed with {ret}")
|
||||||
|
|
||||||
def pacman(self, args: list[str]):
|
def pacman(self, args: list[str]):
|
||||||
|
"""
|
||||||
|
Call pacman for rootfs
|
||||||
|
"""
|
||||||
config = os.path.join(self.ctx.work, "pacman.conf")
|
config = os.path.join(self.ctx.work, "pacman.conf")
|
||||||
cmds = ["pacman"]
|
cmds = ["pacman"]
|
||||||
cmds.append("--noconfirm")
|
cmds.append("--noconfirm")
|
||||||
@ -110,20 +128,32 @@ class Pacman:
|
|||||||
if ret != 0: raise OSError(f"pacman failed with {ret}")
|
if ret != 0: raise OSError(f"pacman failed with {ret}")
|
||||||
|
|
||||||
def add_database(self, repo: dict):
|
def add_database(self, repo: dict):
|
||||||
|
"""
|
||||||
|
Add a database and update it
|
||||||
|
"""
|
||||||
def resolve(url: str) -> str:
|
def resolve(url: str) -> str:
|
||||||
|
"""
|
||||||
|
Replace pacman.conf variables
|
||||||
|
"""
|
||||||
return (url
|
return (url
|
||||||
.replace("$arch", self.ctx.tgt_arch)
|
.replace("$arch", self.ctx.tgt_arch)
|
||||||
.replace("$repo", name))
|
.replace("$repo", name))
|
||||||
if "name" not in repo:
|
if "name" not in repo:
|
||||||
raise ArchBuilderConfigError("repo name not set")
|
raise ArchBuilderConfigError("repo name not set")
|
||||||
name = repo["name"]
|
name = repo["name"]
|
||||||
|
|
||||||
|
# never add local into database
|
||||||
if name == "local" or "/" in name:
|
if name == "local" or "/" in name:
|
||||||
raise ArchBuilderConfigError("bad repo name")
|
raise ArchBuilderConfigError("bad repo name")
|
||||||
|
|
||||||
|
# register database
|
||||||
if name not in self.databases:
|
if name not in self.databases:
|
||||||
self.databases[name] = self.handle.register_syncdb(
|
self.databases[name] = self.handle.register_syncdb(
|
||||||
name, pyalpm.SIG_DATABASE_MARGINAL_OK
|
name, pyalpm.SIG_DATABASE_MARGINAL_OK
|
||||||
)
|
)
|
||||||
db = self.databases[name]
|
db = self.databases[name]
|
||||||
|
|
||||||
|
# add databases servers
|
||||||
servers: list[str] = []
|
servers: list[str] = []
|
||||||
if "server" in repo:
|
if "server" in repo:
|
||||||
servers.append(resolve(repo["server"]))
|
servers.append(resolve(repo["server"]))
|
||||||
@ -131,10 +161,15 @@ class Pacman:
|
|||||||
for server in repo["servers"]:
|
for server in repo["servers"]:
|
||||||
servers.append(resolve(server))
|
servers.append(resolve(server))
|
||||||
db.servers = servers
|
db.servers = servers
|
||||||
|
|
||||||
|
# update database now via pyalpm
|
||||||
log.info(f"updating database {name}")
|
log.info(f"updating database {name}")
|
||||||
db.update(False)
|
db.update(False)
|
||||||
|
|
||||||
def load_databases(self):
|
def load_databases(self):
|
||||||
|
"""
|
||||||
|
Add all databases and load them
|
||||||
|
"""
|
||||||
cfg = self.config
|
cfg = self.config
|
||||||
if "repo" not in cfg:
|
if "repo" not in cfg:
|
||||||
raise ArchBuilderConfigError("no repos found in config")
|
raise ArchBuilderConfigError("no repos found in config")
|
||||||
@ -144,21 +179,33 @@ class Pacman:
|
|||||||
self.refresh()
|
self.refresh()
|
||||||
|
|
||||||
def lookup_package(self, name: str) -> list[pyalpm.Package]:
|
def lookup_package(self, name: str) -> list[pyalpm.Package]:
|
||||||
|
"""
|
||||||
|
Lookup pyalpm package by name
|
||||||
|
"""
|
||||||
|
|
||||||
|
# pass a filename, load it directly
|
||||||
if ".pkg.tar." in name:
|
if ".pkg.tar." in name:
|
||||||
pkg = self.handle.load_pkg(name)
|
pkg = self.handle.load_pkg(name)
|
||||||
if pkg is None: raise RuntimeError(f"load package {name} failed")
|
if pkg is None: raise RuntimeError(f"load package {name} failed")
|
||||||
return [pkg]
|
return [pkg]
|
||||||
|
|
||||||
s = name.split("/")
|
s = name.split("/")
|
||||||
if len(s) == 2:
|
if len(s) == 2:
|
||||||
if s[0] not in self.databases:
|
# use DATABASE/PACKAGE, find it in database
|
||||||
|
if s[0] not in self.databases and s[0] != "local":
|
||||||
raise ValueError(f"database {s[0]} not found")
|
raise ValueError(f"database {s[0]} not found")
|
||||||
db = (self.handle.get_localdb() if s[0] == "local" else self.databases[s[0]])
|
db = (self.handle.get_localdb() if s[0] == "local" else self.databases[s[0]])
|
||||||
pkg = db.get_pkg(s[1])
|
pkg = db.get_pkg(s[1])
|
||||||
if pkg: return [pkg]
|
if pkg: return [pkg]
|
||||||
raise ValueError(f"package {s[1]} not found")
|
raise ValueError(f"package {s[1]} not found")
|
||||||
elif len(s) == 1:
|
elif len(s) == 1:
|
||||||
|
# use PACKAGE, find it in all databases or find as group
|
||||||
|
|
||||||
|
# try find it as group
|
||||||
pkg = pyalpm.find_grp_pkgs(self.databases.values(), name)
|
pkg = pyalpm.find_grp_pkgs(self.databases.values(), name)
|
||||||
if len(pkg) > 0: return pkg
|
if len(pkg) > 0: return pkg
|
||||||
|
|
||||||
|
# try find it as package
|
||||||
for dbn in self.databases:
|
for dbn in self.databases:
|
||||||
db = self.databases[dbn]
|
db = self.databases[dbn]
|
||||||
pkg = db.get_pkg(name)
|
pkg = db.get_pkg(name)
|
||||||
@ -167,18 +214,27 @@ class Pacman:
|
|||||||
raise ValueError(f"bad package name {name}")
|
raise ValueError(f"bad package name {name}")
|
||||||
|
|
||||||
def init_cache(self):
|
def init_cache(self):
|
||||||
host_cache = "/var/cache/pacman/pkg"
|
"""
|
||||||
work_cache = os.path.join(self.ctx.work, "packages")
|
Initialize pacman cache folder
|
||||||
root_cache = os.path.join(self.root, "var/cache/pacman/pkg")
|
"""
|
||||||
|
host_cache = "/var/cache/pacman/pkg" # host cache
|
||||||
|
work_cache = os.path.join(self.ctx.work, "packages") # workspace cache
|
||||||
|
root_cache = os.path.join(self.root, "var/cache/pacman/pkg") # rootfs cache
|
||||||
self.caches.clear()
|
self.caches.clear()
|
||||||
|
|
||||||
|
# host cache is existing, use host cache folder
|
||||||
if os.path.exists(host_cache):
|
if os.path.exists(host_cache):
|
||||||
self.caches.append(host_cache)
|
self.caches.append(host_cache)
|
||||||
|
|
||||||
self.caches.append(work_cache)
|
self.caches.append(work_cache)
|
||||||
self.caches.append(root_cache)
|
self.caches.append(root_cache)
|
||||||
os.makedirs(work_cache, mode=0o0755, exist_ok=True)
|
os.makedirs(work_cache, mode=0o0755, exist_ok=True)
|
||||||
os.makedirs(root_cache, mode=0o0755, exist_ok=True)
|
os.makedirs(root_cache, mode=0o0755, exist_ok=True)
|
||||||
|
|
||||||
def __init__(self, ctx: ArchBuilderContext):
|
def __init__(self, ctx: ArchBuilderContext):
|
||||||
|
"""
|
||||||
|
Initialize pacman context
|
||||||
|
"""
|
||||||
self.ctx = ctx
|
self.ctx = ctx
|
||||||
if "pacman" not in ctx.config:
|
if "pacman" not in ctx.config:
|
||||||
raise ArchBuilderConfigError("no pacman found in config")
|
raise ArchBuilderConfigError("no pacman found in config")
|
||||||
@ -200,6 +256,9 @@ class Pacman:
|
|||||||
self.init_config()
|
self.init_config()
|
||||||
|
|
||||||
def uninstall(self, pkgs: list[str]):
|
def uninstall(self, pkgs: list[str]):
|
||||||
|
"""
|
||||||
|
Uninstall packages via pacman
|
||||||
|
"""
|
||||||
if len(pkgs) == 0: return
|
if len(pkgs) == 0: return
|
||||||
ps = " ".join(pkgs)
|
ps = " ".join(pkgs)
|
||||||
log.info(f"removing packages {ps}")
|
log.info(f"removing packages {ps}")
|
||||||
@ -215,6 +274,9 @@ class Pacman:
|
|||||||
asdeps: bool = False,
|
asdeps: bool = False,
|
||||||
nodeps: bool = False,
|
nodeps: bool = False,
|
||||||
):
|
):
|
||||||
|
"""
|
||||||
|
Install packages via pacman
|
||||||
|
"""
|
||||||
if len(pkgs) == 0: return
|
if len(pkgs) == 0: return
|
||||||
core_db = "var/lib/pacman/sync/core.db"
|
core_db = "var/lib/pacman/sync/core.db"
|
||||||
if not os.path.exists(os.path.join(self.root, core_db)):
|
if not os.path.exists(os.path.join(self.root, core_db)):
|
||||||
@ -229,6 +291,9 @@ class Pacman:
|
|||||||
self.pacman(args)
|
self.pacman(args)
|
||||||
|
|
||||||
def download(self, pkgs: list[str]):
|
def download(self, pkgs: list[str]):
|
||||||
|
"""
|
||||||
|
Download packages via pacman
|
||||||
|
"""
|
||||||
if len(pkgs) == 0: return
|
if len(pkgs) == 0: return
|
||||||
core_db = "var/lib/pacman/sync/core.db"
|
core_db = "var/lib/pacman/sync/core.db"
|
||||||
if not os.path.exists(os.path.join(self.root, core_db)):
|
if not os.path.exists(os.path.join(self.root, core_db)):
|
||||||
@ -239,6 +304,9 @@ class Pacman:
|
|||||||
self.pacman(args)
|
self.pacman(args)
|
||||||
|
|
||||||
def install_local(self, files: list[str]):
|
def install_local(self, files: list[str]):
|
||||||
|
"""
|
||||||
|
Install a local packages via pacman
|
||||||
|
"""
|
||||||
if len(files) == 0: return
|
if len(files) == 0: return
|
||||||
log.info("installing local packages %s", " ".join(files))
|
log.info("installing local packages %s", " ".join(files))
|
||||||
args = ["--needed", "--upgrade"]
|
args = ["--needed", "--upgrade"]
|
||||||
@ -246,12 +314,18 @@ class Pacman:
|
|||||||
self.pacman(args)
|
self.pacman(args)
|
||||||
|
|
||||||
def refresh(self, /, force: bool = False):
|
def refresh(self, /, force: bool = False):
|
||||||
|
"""
|
||||||
|
Update local databases via pacman
|
||||||
|
"""
|
||||||
log.info("refresh pacman database")
|
log.info("refresh pacman database")
|
||||||
args = ["--sync", "--refresh"]
|
args = ["--sync", "--refresh"]
|
||||||
if force: args.append("--refresh")
|
if force: args.append("--refresh")
|
||||||
self.pacman(args)
|
self.pacman(args)
|
||||||
|
|
||||||
def recv_keys(self, keys: str | list[str]):
|
def recv_keys(self, keys: str | list[str]):
|
||||||
|
"""
|
||||||
|
Receive a key via pacman-key
|
||||||
|
"""
|
||||||
args = ["--recv-keys"]
|
args = ["--recv-keys"]
|
||||||
if type(keys) is str:
|
if type(keys) is str:
|
||||||
args.append(keys)
|
args.append(keys)
|
||||||
@ -262,6 +336,9 @@ class Pacman:
|
|||||||
self.pacman_key(args)
|
self.pacman_key(args)
|
||||||
|
|
||||||
def lsign_key(self, key: str):
|
def lsign_key(self, key: str):
|
||||||
|
"""
|
||||||
|
Local sign a key via pacman-key
|
||||||
|
"""
|
||||||
self.pacman_key(["--lsign-key", key])
|
self.pacman_key(["--lsign-key", key])
|
||||||
|
|
||||||
def pouplate_keys(
|
def pouplate_keys(
|
||||||
@ -269,6 +346,9 @@ class Pacman:
|
|||||||
names: str | list[str] = None,
|
names: str | list[str] = None,
|
||||||
folder: str = None
|
folder: str = None
|
||||||
):
|
):
|
||||||
|
"""
|
||||||
|
Populate all keys via pacman-key
|
||||||
|
"""
|
||||||
args = ["--populate"]
|
args = ["--populate"]
|
||||||
if folder: args.extend(["--populate-from", folder])
|
if folder: args.extend(["--populate-from", folder])
|
||||||
if names is None: pass
|
if names is None: pass
|
||||||
@ -278,31 +358,49 @@ class Pacman:
|
|||||||
self.pacman_key(args)
|
self.pacman_key(args)
|
||||||
|
|
||||||
def find_package_file(self, pkg: pyalpm.Package) -> str | None:
|
def find_package_file(self, pkg: pyalpm.Package) -> str | None:
|
||||||
|
"""
|
||||||
|
Find out pacman package archive file in cache
|
||||||
|
"""
|
||||||
for cache in self.caches:
|
for cache in self.caches:
|
||||||
p = os.path.join(cache, pkg.filename)
|
p = os.path.join(cache, pkg.filename)
|
||||||
if os.path.exists(p): return p
|
if os.path.exists(p): return p
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def trust_keyring_pkg(self, pkg: pyalpm.Package):
|
def trust_keyring_pkg(self, pkg: pyalpm.Package):
|
||||||
|
"""
|
||||||
|
Trust a keyring package from file without install it
|
||||||
|
"""
|
||||||
if not self.ctx.gpgcheck: return
|
if not self.ctx.gpgcheck: return
|
||||||
names: list[str] = []
|
names: list[str] = []
|
||||||
target = os.path.join(self.ctx.work, "keyrings")
|
target = os.path.join(self.ctx.work, "keyrings")
|
||||||
keyring = "usr/share/pacman/keyrings/"
|
keyring = "usr/share/pacman/keyrings/"
|
||||||
|
|
||||||
|
# find out file path
|
||||||
path = self.find_package_file(pkg)
|
path = self.find_package_file(pkg)
|
||||||
|
|
||||||
|
# cleanup keyring extract folder
|
||||||
if os.path.exists(target):
|
if os.path.exists(target):
|
||||||
shutil.rmtree(target)
|
shutil.rmtree(target)
|
||||||
os.makedirs(target, mode=0o0755)
|
os.makedirs(target, mode=0o0755)
|
||||||
if path is None: raise RuntimeError(
|
if path is None: raise RuntimeError(
|
||||||
f"package {pkg.name} not found"
|
f"package {pkg.name} not found"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# open keyring package to extract
|
||||||
log.debug(f"processing keyring package {pkg.name}")
|
log.debug(f"processing keyring package {pkg.name}")
|
||||||
with libarchive.file_reader(path) as archive:
|
with libarchive.file_reader(path) as archive:
|
||||||
for file in archive:
|
for file in archive:
|
||||||
pn: str = file.pathname
|
pn: str = file.pathname
|
||||||
if not pn.startswith(keyring): continue
|
if not pn.startswith(keyring): continue
|
||||||
|
|
||||||
|
# get the filename of file
|
||||||
fn = pn[len(keyring):]
|
fn = pn[len(keyring):]
|
||||||
if len(fn) <= 0: continue
|
if len(fn) <= 0: continue
|
||||||
|
|
||||||
|
# add keyring name to populate
|
||||||
if fn.endswith(".gpg"): names.append(fn[:-4])
|
if fn.endswith(".gpg"): names.append(fn[:-4])
|
||||||
|
|
||||||
|
# extract file
|
||||||
dest = os.path.join(target, fn)
|
dest = os.path.join(target, fn)
|
||||||
log.debug(f"extracting {pn} to {dest}")
|
log.debug(f"extracting {pn} to {dest}")
|
||||||
with open(dest, "wb") as f:
|
with open(dest, "wb") as f:
|
||||||
@ -311,9 +409,14 @@ class Pacman:
|
|||||||
fd = f.fileno()
|
fd = f.fileno()
|
||||||
os.fchmod(fd, file.mode)
|
os.fchmod(fd, file.mode)
|
||||||
os.fchown(fd, file.uid, file.gid)
|
os.fchown(fd, file.uid, file.gid)
|
||||||
|
|
||||||
|
# trust extracted keyring
|
||||||
self.pouplate_keys(names, target)
|
self.pouplate_keys(names, target)
|
||||||
|
|
||||||
def add_trust_keyring_pkg(self, pkgnames: list[str]):
|
def add_trust_keyring_pkg(self, pkgnames: list[str]):
|
||||||
|
"""
|
||||||
|
Trust a keyring package from file without install it
|
||||||
|
"""
|
||||||
if not self.ctx.gpgcheck: return
|
if not self.ctx.gpgcheck: return
|
||||||
if len(pkgnames) <= 0: return
|
if len(pkgnames) <= 0: return
|
||||||
self.download(pkgnames)
|
self.download(pkgnames)
|
||||||
|
@ -4,13 +4,18 @@ from builder.lib.context import ArchBuilderContext
|
|||||||
|
|
||||||
|
|
||||||
def systemctl(ctx: ArchBuilderContext, args: list[str]):
|
def systemctl(ctx: ArchBuilderContext, args: list[str]):
|
||||||
|
"""
|
||||||
|
Call systemctl in rootfs
|
||||||
|
"""
|
||||||
path = ctx.get_rootfs()
|
path = ctx.get_rootfs()
|
||||||
full_args = ["systemctl"]
|
full_args = ["systemctl"]
|
||||||
if utils.have_external("systemctl"):
|
if utils.have_external("systemctl"):
|
||||||
|
# use host systemctl possible
|
||||||
full_args.append(f"--root={path}")
|
full_args.append(f"--root={path}")
|
||||||
full_args.extend(args)
|
full_args.extend(args)
|
||||||
ret = ctx.run_external(full_args)
|
ret = ctx.run_external(full_args)
|
||||||
else:
|
else:
|
||||||
|
# if host systemd is unavailable, use chroot run
|
||||||
full_args.extend(args)
|
full_args.extend(args)
|
||||||
ret = filesystem.chroot_run(ctx, full_args)
|
ret = filesystem.chroot_run(ctx, full_args)
|
||||||
if ret != 0: raise OSError(
|
if ret != 0: raise OSError(
|
||||||
@ -20,6 +25,9 @@ def systemctl(ctx: ArchBuilderContext, args: list[str]):
|
|||||||
|
|
||||||
|
|
||||||
def enable(ctx: ArchBuilderContext, units: list[str]):
|
def enable(ctx: ArchBuilderContext, units: list[str]):
|
||||||
|
"""
|
||||||
|
Enable systemd units
|
||||||
|
"""
|
||||||
if len(units) <= 0: return
|
if len(units) <= 0: return
|
||||||
args = ["enable", "--"]
|
args = ["enable", "--"]
|
||||||
args.extend(units)
|
args.extend(units)
|
||||||
@ -27,6 +35,9 @@ def enable(ctx: ArchBuilderContext, units: list[str]):
|
|||||||
|
|
||||||
|
|
||||||
def disable(ctx: ArchBuilderContext, units: list[str]):
|
def disable(ctx: ArchBuilderContext, units: list[str]):
|
||||||
|
"""
|
||||||
|
Disable systemd units
|
||||||
|
"""
|
||||||
if len(units) <= 0: return
|
if len(units) <= 0: return
|
||||||
args = ["disable", "--"]
|
args = ["disable", "--"]
|
||||||
args.extend(units)
|
args.extend(units)
|
||||||
@ -34,5 +45,8 @@ def disable(ctx: ArchBuilderContext, units: list[str]):
|
|||||||
|
|
||||||
|
|
||||||
def set_default(ctx: ArchBuilderContext, unit: str):
|
def set_default(ctx: ArchBuilderContext, unit: str):
|
||||||
|
"""
|
||||||
|
Set default boot target for systemd
|
||||||
|
"""
|
||||||
if not unit: return
|
if not unit: return
|
||||||
systemctl(ctx, ["set-default", "--", unit])
|
systemctl(ctx, ["set-default", "--", unit])
|
||||||
|
@ -62,9 +62,13 @@ def parse_user_from(
|
|||||||
node: dict,
|
node: dict,
|
||||||
default: tuple[int, int] = (0, -1)
|
default: tuple[int, int] = (0, -1)
|
||||||
) -> tuple[int, int]:
|
) -> tuple[int, int]:
|
||||||
if "owner" in node: return parse_owner(ctx, node["owner"])
|
"""
|
||||||
uid = parse_usergroup_from(ctx, node, False, default[0])
|
Parse user/group id from config
|
||||||
gid = parse_usergroup_from(ctx, node, True, default[1])
|
"""
|
||||||
|
uid, gid = default
|
||||||
|
if "owner" in node: uid, gid = parse_owner(ctx, node["owner"])
|
||||||
|
uid = parse_usergroup_from(ctx, node, False, uid)
|
||||||
|
gid = parse_usergroup_from(ctx, node, True, gid)
|
||||||
if gid == -1:
|
if gid == -1:
|
||||||
user = ctx.passwd.lookup_uid(uid)
|
user = ctx.passwd.lookup_uid(uid)
|
||||||
if user is None: raise ArchBuilderConfigError(
|
if user is None: raise ArchBuilderConfigError(
|
||||||
|
@ -23,10 +23,16 @@ class Area(SerializableDict):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def reset(self) -> Self:
|
def reset(self) -> Self:
|
||||||
|
"""
|
||||||
|
Remove all fields
|
||||||
|
"""
|
||||||
self.set(-1, -1, -1)
|
self.set(-1, -1, -1)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def from_dict(self, o: dict) -> Self:
|
def from_dict(self, o: dict) -> Self:
|
||||||
|
"""
|
||||||
|
Load all fields from config
|
||||||
|
"""
|
||||||
self.reset()
|
self.reset()
|
||||||
if "start" in o: self.start = size_to_bytes(o["start"])
|
if "start" in o: self.start = size_to_bytes(o["start"])
|
||||||
if "offset" in o: self.start = size_to_bytes(o["offset"])
|
if "offset" in o: self.start = size_to_bytes(o["offset"])
|
||||||
@ -36,6 +42,9 @@ class Area(SerializableDict):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
def is_area_in(self, area: Self) -> bool:
|
def is_area_in(self, area: Self) -> bool:
|
||||||
|
"""
|
||||||
|
Is another area full in this area
|
||||||
|
"""
|
||||||
self.fixup()
|
self.fixup()
|
||||||
area.fixup()
|
area.fixup()
|
||||||
return (
|
return (
|
||||||
@ -45,6 +54,9 @@ class Area(SerializableDict):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def fixup(self) -> Self:
|
def fixup(self) -> Self:
|
||||||
|
"""
|
||||||
|
Fill missing fields
|
||||||
|
"""
|
||||||
if self.start >= 0 and self.end >= 0 and self.start > self.end + 1:
|
if self.start >= 0 and self.end >= 0 and self.start > self.end + 1:
|
||||||
raise ValueError("start large than end")
|
raise ValueError("start large than end")
|
||||||
if 0 <= self.end < self.size and self.size >= 0:
|
if 0 <= self.end < self.size and self.size >= 0:
|
||||||
@ -52,17 +64,20 @@ class Area(SerializableDict):
|
|||||||
if self.start >= 0 and self.end >= 0 and self.size >= 0:
|
if self.start >= 0 and self.end >= 0 and self.size >= 0:
|
||||||
if self.size != self.end - self.start + 1:
|
if self.size != self.end - self.start + 1:
|
||||||
raise ValueError("bad size")
|
raise ValueError("bad size")
|
||||||
elif self.start >= 0 and self.end >= 0:
|
elif self.start >= 0 and self.end >= 0: # need size
|
||||||
self.size = self.end - self.start + 1
|
self.size = self.end - self.start + 1
|
||||||
elif self.start >= 0 and self.size >= 0:
|
elif self.start >= 0 and self.size >= 0: # need end
|
||||||
self.end = self.start + self.size - 1
|
self.end = self.start + self.size - 1
|
||||||
elif self.end >= 0 and self.size >= 0:
|
elif self.end >= 0 and self.size >= 0: # need start
|
||||||
self.start = self.end - self.size + 1
|
self.start = self.end - self.size + 1
|
||||||
else:
|
else:
|
||||||
raise ValueError("missing value")
|
raise ValueError("missing value")
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __init__(self, start: int = -1, end: int = -1, size: int = -1, area: Self = None):
|
def __init__(self, start: int = -1, end: int = -1, size: int = -1, area: Self = None):
|
||||||
|
"""
|
||||||
|
Initialize a area
|
||||||
|
"""
|
||||||
super().__init__()
|
super().__init__()
|
||||||
if area: start, end, size = area.to_tuple()
|
if area: start, end, size = area.to_tuple()
|
||||||
self.start, self.end, self.size = start, end, size
|
self.start, self.end, self.size = start, end, size
|
||||||
@ -78,9 +93,15 @@ def to_tuple(start: int = -1, end: int = -1, size: int = -1, area: Area = None)
|
|||||||
|
|
||||||
class Areas(list[Area], SerializableList):
|
class Areas(list[Area], SerializableList):
|
||||||
def is_area_in(self, area: Area) -> bool:
|
def is_area_in(self, area: Area) -> bool:
|
||||||
|
"""
|
||||||
|
Is an area fully in this areas
|
||||||
|
"""
|
||||||
return any(pool.is_area_in(area) for pool in self)
|
return any(pool.is_area_in(area) for pool in self)
|
||||||
|
|
||||||
def merge(self) -> Self:
|
def merge(self) -> Self:
|
||||||
|
"""
|
||||||
|
Merge all areas
|
||||||
|
"""
|
||||||
idx = 0
|
idx = 0
|
||||||
self.sort(key=lambda x: (x.start, x.end))
|
self.sort(key=lambda x: (x.start, x.end))
|
||||||
while len(self) > 0:
|
while len(self) > 0:
|
||||||
@ -91,6 +112,7 @@ class Areas(list[Area], SerializableList):
|
|||||||
if idx > 0:
|
if idx > 0:
|
||||||
last = self[idx - 1]
|
last = self[idx - 1]
|
||||||
if last.end + 1 >= curr.start:
|
if last.end + 1 >= curr.start:
|
||||||
|
# last end equals to this start
|
||||||
ent = Area(last.start, curr.end)
|
ent = Area(last.start, curr.end)
|
||||||
ent.fixup()
|
ent.fixup()
|
||||||
self.remove(last)
|
self.remove(last)
|
||||||
@ -108,6 +130,9 @@ class Areas(list[Area], SerializableList):
|
|||||||
size: int = -1,
|
size: int = -1,
|
||||||
area: Area = None,
|
area: Area = None,
|
||||||
) -> Area | None:
|
) -> Area | None:
|
||||||
|
"""
|
||||||
|
Lookup an area with fields
|
||||||
|
"""
|
||||||
start, end, size = to_tuple(start, end, size, area)
|
start, end, size = to_tuple(start, end, size, area)
|
||||||
for area in self:
|
for area in self:
|
||||||
if not (area.start <= start <= area.end): continue
|
if not (area.start <= start <= area.end): continue
|
||||||
@ -117,6 +142,9 @@ class Areas(list[Area], SerializableList):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def align(self, align: int) -> Self:
|
def align(self, align: int) -> Self:
|
||||||
|
"""
|
||||||
|
Align all fields to value
|
||||||
|
"""
|
||||||
self.sort(key=lambda x: (x.start, x.end))
|
self.sort(key=lambda x: (x.start, x.end))
|
||||||
for area in self:
|
for area in self:
|
||||||
start = round_up(area.start, align)
|
start = round_up(area.start, align)
|
||||||
@ -136,6 +164,9 @@ class Areas(list[Area], SerializableList):
|
|||||||
size: int = -1,
|
size: int = -1,
|
||||||
area: Area = None
|
area: Area = None
|
||||||
) -> Area | None:
|
) -> Area | None:
|
||||||
|
"""
|
||||||
|
Add an area to this areas
|
||||||
|
"""
|
||||||
if area: start, end, size = area.to_tuple()
|
if area: start, end, size = area.to_tuple()
|
||||||
cnt = (start >= 0) + (end >= 0) + (size >= 0)
|
cnt = (start >= 0) + (end >= 0) + (size >= 0)
|
||||||
if cnt < 2: raise ValueError("missing value")
|
if cnt < 2: raise ValueError("missing value")
|
||||||
@ -151,6 +182,9 @@ class Areas(list[Area], SerializableList):
|
|||||||
size: int = -1,
|
size: int = -1,
|
||||||
area: Area = None,
|
area: Area = None,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Remove a range from areas
|
||||||
|
"""
|
||||||
start, end, size = to_tuple(start, end, size, area)
|
start, end, size = to_tuple(start, end, size, area)
|
||||||
if len(self) <= 0: return False
|
if len(self) <= 0: return False
|
||||||
rs = min(area.start for area in self)
|
rs = min(area.start for area in self)
|
||||||
@ -174,6 +208,9 @@ class Areas(list[Area], SerializableList):
|
|||||||
area: Area = None,
|
area: Area = None,
|
||||||
biggest: bool = True,
|
biggest: bool = True,
|
||||||
) -> Area | None:
|
) -> Area | None:
|
||||||
|
"""
|
||||||
|
Find matched area
|
||||||
|
"""
|
||||||
if area: start, end, size = area.to_tuple()
|
if area: start, end, size = area.to_tuple()
|
||||||
cnt = (start >= 0) + (end >= 0) + (size >= 0)
|
cnt = (start >= 0) + (end >= 0) + (size >= 0)
|
||||||
if cnt >= 2:
|
if cnt >= 2:
|
||||||
|
@ -11,27 +11,45 @@ class CGroup:
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def path(self) -> str:
|
def path(self) -> str:
|
||||||
|
"""
|
||||||
|
Get full path of this cgroup
|
||||||
|
"""
|
||||||
return os.path.join(self.fs, self.name)
|
return os.path.join(self.fs, self.name)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def valid(self) -> bool:
|
def valid(self) -> bool:
|
||||||
|
"""
|
||||||
|
Can read or write to this cgroup
|
||||||
|
"""
|
||||||
return os.path.exists(self.path)
|
return os.path.exists(self.path)
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
|
"""
|
||||||
|
Create this cgroup now
|
||||||
|
"""
|
||||||
if self.valid: return
|
if self.valid: return
|
||||||
os.mkdir(self.path)
|
os.mkdir(self.path)
|
||||||
|
|
||||||
def destroy(self):
|
def destroy(self):
|
||||||
|
"""
|
||||||
|
Destroy the cgroup
|
||||||
|
"""
|
||||||
if not self.valid: return
|
if not self.valid: return
|
||||||
os.rmdir(self.path)
|
os.rmdir(self.path)
|
||||||
|
|
||||||
def add_pid(self, pid: int):
|
def add_pid(self, pid: int):
|
||||||
|
"""
|
||||||
|
Add a pid to track
|
||||||
|
"""
|
||||||
if not self.valid: return
|
if not self.valid: return
|
||||||
procs = os.path.join(self.path, "cgroup.procs")
|
procs = os.path.join(self.path, "cgroup.procs")
|
||||||
with open(procs, "w") as f:
|
with open(procs, "w") as f:
|
||||||
f.write(f"{pid}\n")
|
f.write(f"{pid}\n")
|
||||||
|
|
||||||
def list_pid(self) -> list[int]:
|
def list_pid(self) -> list[int]:
|
||||||
|
"""
|
||||||
|
List all tracked children progress id
|
||||||
|
"""
|
||||||
ret: list[int] = []
|
ret: list[int] = []
|
||||||
if not self.valid: return ret
|
if not self.valid: return ret
|
||||||
procs = os.path.join(self.path, "cgroup.procs")
|
procs = os.path.join(self.path, "cgroup.procs")
|
||||||
@ -41,22 +59,36 @@ class CGroup:
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def kill_all(self, sig: int = signal.SIGTERM, timeout: int = 10, kill: int = 8):
|
def kill_all(self, sig: int = signal.SIGTERM, timeout: int = 10, kill: int = 8):
|
||||||
|
"""
|
||||||
|
Kill all children process and wait them exit
|
||||||
|
"""
|
||||||
if not self.valid: return
|
if not self.valid: return
|
||||||
pids = self.list_pid()
|
pids = self.list_pid()
|
||||||
remain = 0
|
remain = 0
|
||||||
while True:
|
while True:
|
||||||
|
# send a signal
|
||||||
for pid in pids:
|
for pid in pids:
|
||||||
log.debug(f"killing {pid}")
|
log.debug(f"killing {pid}")
|
||||||
try: os.kill(pid, sig)
|
try: os.kill(pid, sig)
|
||||||
except: pass
|
except: pass
|
||||||
|
|
||||||
|
# waitpid to clean zombie
|
||||||
try: os.waitpid(-1, os.WNOHANG)
|
try: os.waitpid(-1, os.WNOHANG)
|
||||||
except: pass
|
except: pass
|
||||||
|
|
||||||
|
# check all children was exited
|
||||||
pids = self.list_pid()
|
pids = self.list_pid()
|
||||||
if len(pids) <= 0: break
|
if len(pids) <= 0: break
|
||||||
|
|
||||||
|
# set to SIGKILL when reached kill time
|
||||||
if 0 < kill <= remain:
|
if 0 < kill <= remain:
|
||||||
sig = signal.SIGKILL
|
sig = signal.SIGKILL
|
||||||
|
|
||||||
|
# timeoutd, throw out
|
||||||
if remain >= timeout:
|
if remain >= timeout:
|
||||||
raise TimeoutError("killing pids timedout")
|
raise TimeoutError("killing pids timedout")
|
||||||
|
|
||||||
|
# wait...
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
def __init__(self, name: str, fs: str = None):
|
def __init__(self, name: str, fs: str = None):
|
||||||
|
@ -12,6 +12,9 @@ class ArchBuilderConfigError(Exception):
|
|||||||
|
|
||||||
|
|
||||||
def _dict_merge(dst: dict, src: dict):
|
def _dict_merge(dst: dict, src: dict):
|
||||||
|
"""
|
||||||
|
Merge two dict with override
|
||||||
|
"""
|
||||||
for key in src.keys():
|
for key in src.keys():
|
||||||
st = type(src[key])
|
st = type(src[key])
|
||||||
if key in dst and st is type(dst[key]):
|
if key in dst and st is type(dst[key]):
|
||||||
|
@ -89,6 +89,9 @@ class ArchBuilderContext:
|
|||||||
group: GroupFile = GroupFile()
|
group: GroupFile = GroupFile()
|
||||||
|
|
||||||
def get(self, key: str, default=None):
|
def get(self, key: str, default=None):
|
||||||
|
"""
|
||||||
|
Get config value
|
||||||
|
"""
|
||||||
try: return dict_get(key, self.config)
|
try: return dict_get(key, self.config)
|
||||||
except: return default
|
except: return default
|
||||||
|
|
||||||
@ -104,6 +107,9 @@ class ArchBuilderContext:
|
|||||||
self.cleanup()
|
self.cleanup()
|
||||||
|
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
|
"""
|
||||||
|
Cleanup build context
|
||||||
|
"""
|
||||||
from builder.build.mount import undo_mounts
|
from builder.build.mount import undo_mounts
|
||||||
self.cgroup.kill_all()
|
self.cgroup.kill_all()
|
||||||
self.cgroup.destroy()
|
self.cgroup.destroy()
|
||||||
@ -139,6 +145,9 @@ class ArchBuilderContext:
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def reload_passwd(self):
|
def reload_passwd(self):
|
||||||
|
"""
|
||||||
|
Reload user database
|
||||||
|
"""
|
||||||
root = self.get_rootfs()
|
root = self.get_rootfs()
|
||||||
pf = os.path.join(root, "etc/passwd")
|
pf = os.path.join(root, "etc/passwd")
|
||||||
gf = os.path.join(root, "etc/group")
|
gf = os.path.join(root, "etc/group")
|
||||||
@ -148,9 +157,15 @@ class ArchBuilderContext:
|
|||||||
if os.path.exists(gf): self.group.load_file(gf)
|
if os.path.exists(gf): self.group.load_file(gf)
|
||||||
|
|
||||||
def finish_config(self):
|
def finish_config(self):
|
||||||
|
"""
|
||||||
|
Done load configs
|
||||||
|
"""
|
||||||
self.config_orig = deepcopy(self.config)
|
self.config_orig = deepcopy(self.config)
|
||||||
|
|
||||||
def resolve_subscript(self):
|
def resolve_subscript(self):
|
||||||
|
"""
|
||||||
|
Run subscript replaces
|
||||||
|
"""
|
||||||
ss = SubScript()
|
ss = SubScript()
|
||||||
self.config = deepcopy(self.config_orig)
|
self.config = deepcopy(self.config_orig)
|
||||||
ss.parse(self.config)
|
ss.parse(self.config)
|
||||||
|
@ -4,6 +4,9 @@ from builder.lib import serializable
|
|||||||
|
|
||||||
|
|
||||||
class SerializableEncoder(json.JSONEncoder):
|
class SerializableEncoder(json.JSONEncoder):
|
||||||
|
"""
|
||||||
|
JSON implement of serializable interface
|
||||||
|
"""
|
||||||
def default(self, o):
|
def default(self, o):
|
||||||
if isinstance(o, UUID):
|
if isinstance(o, UUID):
|
||||||
return str(o)
|
return str(o)
|
||||||
|
@ -4,7 +4,7 @@ import libmount
|
|||||||
from typing import Self
|
from typing import Self
|
||||||
from logging import getLogger
|
from logging import getLogger
|
||||||
from builder.lib.blkid import Blkid
|
from builder.lib.blkid import Blkid
|
||||||
from builder.lib.serializable import SerializableDict,SerializableList
|
from builder.lib.serializable import SerializableDict, SerializableList
|
||||||
log = getLogger(__name__)
|
log = getLogger(__name__)
|
||||||
|
|
||||||
virtual_fs = [
|
virtual_fs = [
|
||||||
@ -33,6 +33,9 @@ class MountPoint(SerializableDict):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def virtual(self) -> bool:
|
def virtual(self) -> bool:
|
||||||
|
"""
|
||||||
|
Is current mount point a virtual filesystem
|
||||||
|
"""
|
||||||
if self.fstype:
|
if self.fstype:
|
||||||
if self.fstype in virtual_fs: return True
|
if self.fstype in virtual_fs: return True
|
||||||
if self.fstype in real_fs: return False
|
if self.fstype in real_fs: return False
|
||||||
@ -45,6 +48,12 @@ class MountPoint(SerializableDict):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def level(self) -> int:
|
def level(self) -> int:
|
||||||
|
"""
|
||||||
|
Get current target levels
|
||||||
|
/ => 1
|
||||||
|
/boot => 2
|
||||||
|
/usr/bin => 3
|
||||||
|
"""
|
||||||
if self.target is None: return 0
|
if self.target is None: return 0
|
||||||
path = os.path.realpath(self.target)
|
path = os.path.realpath(self.target)
|
||||||
cnt = path.count(os.sep)
|
cnt = path.count(os.sep)
|
||||||
@ -56,13 +65,22 @@ class MountPoint(SerializableDict):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def options(self):
|
def options(self):
|
||||||
|
"""
|
||||||
|
Get options as string
|
||||||
|
"""
|
||||||
return ",".join(self.option)
|
return ",".join(self.option)
|
||||||
|
|
||||||
@options.setter
|
@options.setter
|
||||||
def options(self, val: str):
|
def options(self, val: str):
|
||||||
|
"""
|
||||||
|
Set options from string
|
||||||
|
"""
|
||||||
self.option = val.split(",")
|
self.option = val.split(",")
|
||||||
|
|
||||||
def get_option(self, opt: str) -> str | None:
|
def get_option(self, opt: str) -> str | None:
|
||||||
|
"""
|
||||||
|
Get an option from string
|
||||||
|
"""
|
||||||
if opt in self.option:
|
if opt in self.option:
|
||||||
return opt
|
return opt
|
||||||
if "=" not in opt:
|
if "=" not in opt:
|
||||||
@ -72,6 +90,9 @@ class MountPoint(SerializableDict):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def remove_option(self, opt: str | list[str]) -> Self:
|
def remove_option(self, opt: str | list[str]) -> Self:
|
||||||
|
"""
|
||||||
|
Remove an option
|
||||||
|
"""
|
||||||
if type(opt) is list[str]:
|
if type(opt) is list[str]:
|
||||||
for o in opt:
|
for o in opt:
|
||||||
self.remove_option(o)
|
self.remove_option(o)
|
||||||
@ -86,11 +107,17 @@ class MountPoint(SerializableDict):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
def exclusive_option(self, opt: str, opt1: str, opt2: str) -> Self:
|
def exclusive_option(self, opt: str, opt1: str, opt2: str) -> Self:
|
||||||
|
"""
|
||||||
|
Remove a exclusive option
|
||||||
|
"""
|
||||||
if opt == opt1 or opt == opt2:
|
if opt == opt1 or opt == opt2:
|
||||||
self.remove_option(opt1)
|
self.remove_option(opt1)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def add_option(self, opt: str) -> Self:
|
def add_option(self, opt: str) -> Self:
|
||||||
|
"""
|
||||||
|
Add an option
|
||||||
|
"""
|
||||||
self.exclusive_option(opt, "ro", "rw")
|
self.exclusive_option(opt, "ro", "rw")
|
||||||
self.exclusive_option(opt, "dev", "nodev")
|
self.exclusive_option(opt, "dev", "nodev")
|
||||||
self.exclusive_option(opt, "suid", "nosuid")
|
self.exclusive_option(opt, "suid", "nosuid")
|
||||||
@ -102,10 +129,16 @@ class MountPoint(SerializableDict):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
def ro(self) -> Self:
|
def ro(self) -> Self:
|
||||||
|
"""
|
||||||
|
Set mount point to read-only
|
||||||
|
"""
|
||||||
self.add_option("ro")
|
self.add_option("ro")
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def rw(self) -> Self:
|
def rw(self) -> Self:
|
||||||
|
"""
|
||||||
|
Set mount point to read-write
|
||||||
|
"""
|
||||||
self.add_option("rw")
|
self.add_option("rw")
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@ -115,6 +148,9 @@ class MountPoint(SerializableDict):
|
|||||||
def have_options(self) -> bool: return len(self.option) > 0
|
def have_options(self) -> bool: return len(self.option) > 0
|
||||||
|
|
||||||
def update_device(self):
|
def update_device(self):
|
||||||
|
"""
|
||||||
|
Update device field from source
|
||||||
|
"""
|
||||||
if self.virtual or self.source is None: return
|
if self.virtual or self.source is None: return
|
||||||
if self.source.startswith(os.sep):
|
if self.source.startswith(os.sep):
|
||||||
self.device = self.source
|
self.device = self.source
|
||||||
@ -124,6 +160,9 @@ class MountPoint(SerializableDict):
|
|||||||
return
|
return
|
||||||
|
|
||||||
def persist_source(self, tag: str = "UUID"):
|
def persist_source(self, tag: str = "UUID"):
|
||||||
|
"""
|
||||||
|
Change source to persist source
|
||||||
|
"""
|
||||||
if self.virtual: return
|
if self.virtual: return
|
||||||
if self.device is None: self.update_device()
|
if self.device is None: self.update_device()
|
||||||
if self.device is None: return
|
if self.device is None: return
|
||||||
@ -136,6 +175,9 @@ class MountPoint(SerializableDict):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def tolibmount(self) -> libmount.Context:
|
def tolibmount(self) -> libmount.Context:
|
||||||
|
"""
|
||||||
|
To util-linux libmount context
|
||||||
|
"""
|
||||||
mnt = libmount.Context()
|
mnt = libmount.Context()
|
||||||
mnt.target = self.target
|
mnt.target = self.target
|
||||||
if self.have_source(): mnt.source = self.source
|
if self.have_source(): mnt.source = self.source
|
||||||
@ -144,9 +186,15 @@ class MountPoint(SerializableDict):
|
|||||||
return mnt
|
return mnt
|
||||||
|
|
||||||
def ismount(self) -> bool:
|
def ismount(self) -> bool:
|
||||||
|
"""
|
||||||
|
Is current mount point mounted
|
||||||
|
"""
|
||||||
return os.path.ismount(self.target)
|
return os.path.ismount(self.target)
|
||||||
|
|
||||||
def mount(self) -> Self:
|
def mount(self) -> Self:
|
||||||
|
"""
|
||||||
|
Mount now
|
||||||
|
"""
|
||||||
if not os.path.exists(self.target):
|
if not os.path.exists(self.target):
|
||||||
os.makedirs(self.target, mode=0o0755)
|
os.makedirs(self.target, mode=0o0755)
|
||||||
if not os.path.ismount(self.target):
|
if not os.path.ismount(self.target):
|
||||||
@ -161,6 +209,9 @@ class MountPoint(SerializableDict):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
def umount(self) -> Self:
|
def umount(self) -> Self:
|
||||||
|
"""
|
||||||
|
UnMount now
|
||||||
|
"""
|
||||||
if os.path.ismount(self.target):
|
if os.path.ismount(self.target):
|
||||||
lib = self.tolibmount()
|
lib = self.tolibmount()
|
||||||
lib.umount()
|
lib.umount()
|
||||||
@ -168,6 +219,9 @@ class MountPoint(SerializableDict):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
def from_mount_line(self, line: str) -> Self:
|
def from_mount_line(self, line: str) -> Self:
|
||||||
|
"""
|
||||||
|
Load from mtab / fstab
|
||||||
|
"""
|
||||||
d = line.split()
|
d = line.split()
|
||||||
if len(d) != 6:
|
if len(d) != 6:
|
||||||
raise ValueError("bad mount line")
|
raise ValueError("bad mount line")
|
||||||
@ -180,6 +234,10 @@ class MountPoint(SerializableDict):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
def to_mount_line(self) -> str:
|
def to_mount_line(self) -> str:
|
||||||
|
"""
|
||||||
|
To mount tab line string
|
||||||
|
PARTLABEL=
|
||||||
|
"""
|
||||||
self.fixup()
|
self.fixup()
|
||||||
fields = [
|
fields = [
|
||||||
self.source,
|
self.source,
|
||||||
@ -246,6 +304,9 @@ class MountPoint(SerializableDict):
|
|||||||
|
|
||||||
class MountTab(list[MountPoint], SerializableList):
|
class MountTab(list[MountPoint], SerializableList):
|
||||||
def find_folder(self, folder: str) -> Self:
|
def find_folder(self, folder: str) -> Self:
|
||||||
|
"""
|
||||||
|
Find mount point target starts with folder
|
||||||
|
"""
|
||||||
root = os.path.realpath(folder)
|
root = os.path.realpath(folder)
|
||||||
return [mnt for mnt in self if mnt.target.startswith(root)]
|
return [mnt for mnt in self if mnt.target.startswith(root)]
|
||||||
|
|
||||||
@ -254,12 +315,18 @@ class MountTab(list[MountPoint], SerializableList):
|
|||||||
def find_fstype(self, fstype: str) -> Self: return [mnt for mnt in self if mnt.fstype == fstype]
|
def find_fstype(self, fstype: str) -> Self: return [mnt for mnt in self if mnt.fstype == fstype]
|
||||||
|
|
||||||
def clone(self) -> Self:
|
def clone(self) -> Self:
|
||||||
|
"""
|
||||||
|
Fully clone a MountTab
|
||||||
|
"""
|
||||||
mnts = MountTab()
|
mnts = MountTab()
|
||||||
for mnt in self:
|
for mnt in self:
|
||||||
mnts.append(mnt.clone())
|
mnts.append(mnt.clone())
|
||||||
return mnts
|
return mnts
|
||||||
|
|
||||||
def mount_all(self, prefix: str = None, mkdir: bool = False) -> Self:
|
def mount_all(self, prefix: str = None, mkdir: bool = False) -> Self:
|
||||||
|
"""
|
||||||
|
Mount all mount points
|
||||||
|
"""
|
||||||
for mnt in self:
|
for mnt in self:
|
||||||
m = mnt.clone()
|
m = mnt.clone()
|
||||||
if prefix:
|
if prefix:
|
||||||
@ -271,9 +338,15 @@ class MountTab(list[MountPoint], SerializableList):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
def resort(self):
|
def resort(self):
|
||||||
|
"""
|
||||||
|
Sort mount points by path level
|
||||||
|
"""
|
||||||
self.sort(key=lambda x: (x.level, len(x.target), x.target))
|
self.sort(key=lambda x: (x.level, len(x.target), x.target))
|
||||||
|
|
||||||
def strip_virtual(self) -> Self:
|
def strip_virtual(self) -> Self:
|
||||||
|
"""
|
||||||
|
Remove all virtual filesystem mount points
|
||||||
|
"""
|
||||||
for mnt in self:
|
for mnt in self:
|
||||||
if mnt.virtual:
|
if mnt.virtual:
|
||||||
self.remove(mnt)
|
self.remove(mnt)
|
||||||
@ -288,6 +361,9 @@ class MountTab(list[MountPoint], SerializableList):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
def to_mount_file(self, linesep=os.linesep) -> str:
|
def to_mount_file(self, linesep=os.linesep) -> str:
|
||||||
|
"""
|
||||||
|
Convert to mount file (fstab)
|
||||||
|
"""
|
||||||
ret = "# Source Target FS-Type Options FS-Freq FS-Dump"
|
ret = "# Source Target FS-Type Options FS-Freq FS-Dump"
|
||||||
ret += linesep
|
ret += linesep
|
||||||
for point in self:
|
for point in self:
|
||||||
|
@ -4,10 +4,16 @@ from builder.lib.serializable import SerializableDict, SerializableList
|
|||||||
|
|
||||||
|
|
||||||
def zero2empty(num: int) -> str:
|
def zero2empty(num: int) -> str:
|
||||||
return str(num) if num !=0 else ""
|
"""
|
||||||
|
Convert number to string, when zero return empty
|
||||||
|
"""
|
||||||
|
return str(num) if num != 0 else ""
|
||||||
|
|
||||||
|
|
||||||
def none2empty(val: str) -> str:
|
def none2empty(val: str) -> str:
|
||||||
|
"""
|
||||||
|
Return a string, when None return empty
|
||||||
|
"""
|
||||||
return val if val else ""
|
return val if val else ""
|
||||||
|
|
||||||
|
|
||||||
@ -25,6 +31,9 @@ class UserFile(SerializableList):
|
|||||||
def unload(self): pass
|
def unload(self): pass
|
||||||
|
|
||||||
def load_str(self, content: str | list[str]) -> Self:
|
def load_str(self, content: str | list[str]) -> Self:
|
||||||
|
"""
|
||||||
|
Load whole file as string
|
||||||
|
"""
|
||||||
if type(content) is str:
|
if type(content) is str:
|
||||||
content = content.split("\n")
|
content = content.split("\n")
|
||||||
for line in content:
|
for line in content:
|
||||||
|
@ -13,13 +13,21 @@ def str_find_all(
|
|||||||
start: typing.SupportsIndex | None = None,
|
start: typing.SupportsIndex | None = None,
|
||||||
end: typing.SupportsIndex | None = None,
|
end: typing.SupportsIndex | None = None,
|
||||||
) -> int:
|
) -> int:
|
||||||
|
"""
|
||||||
|
Find the closest string with multiple key
|
||||||
|
"""
|
||||||
if type(keys) is str: return orig.find(keys, start, end)
|
if type(keys) is str: return orig.find(keys, start, end)
|
||||||
result: list[int] = [orig.find(key, start, end) for key in keys]
|
result: list[int] = [orig.find(key, start, end) for key in keys]
|
||||||
while -1 in result: result.remove(-1)
|
while -1 in result: result.remove(-1)
|
||||||
return min(result, default=-1)
|
return min(result, default=-1)
|
||||||
|
|
||||||
|
|
||||||
def parse_cmd_args(cmd: str|list[str]) -> list[str]:
|
def parse_cmd_args(cmd: str | list[str]) -> list[str]:
|
||||||
|
"""
|
||||||
|
Parse command line to list
|
||||||
|
parse_cmd_args("ls -la /mnt") = ["ls", "-la", "/mnt"]
|
||||||
|
parse_cmd_args(["ls", "-la", "/mnt"]) = ["ls", "-la", "/mnt"]
|
||||||
|
"""
|
||||||
if type(cmd) is str: return shlex.split(cmd)
|
if type(cmd) is str: return shlex.split(cmd)
|
||||||
elif type(cmd) is list: return cmd
|
elif type(cmd) is list: return cmd
|
||||||
else: raise TypeError("unknown type for cmd")
|
else: raise TypeError("unknown type for cmd")
|
||||||
@ -49,16 +57,37 @@ def fd_get_path(fd: int) -> str | None:
|
|||||||
fd_get_path(1) = "/dev/pts/0"
|
fd_get_path(1) = "/dev/pts/0"
|
||||||
"""
|
"""
|
||||||
link = f"/proc/self/fd/{fd}"
|
link = f"/proc/self/fd/{fd}"
|
||||||
|
|
||||||
|
# target is not exists?
|
||||||
if not os.path.exists(link): return None
|
if not os.path.exists(link): return None
|
||||||
|
|
||||||
|
# read link of fd
|
||||||
path = os.readlink(link)
|
path = os.readlink(link)
|
||||||
|
|
||||||
|
# must starts with / (is an absolute path)
|
||||||
if not path.startswith("/"): return None
|
if not path.startswith("/"): return None
|
||||||
|
|
||||||
|
# do not use memfd
|
||||||
if path.startswith("/memfd:"): return None
|
if path.startswith("/memfd:"): return None
|
||||||
|
|
||||||
|
# do not use a deleted file
|
||||||
if path.endswith(" (deleted)"): return None
|
if path.endswith(" (deleted)"): return None
|
||||||
|
|
||||||
|
# target file is not exists (should not happen)
|
||||||
if not os.path.exists(path): return None
|
if not os.path.exists(path): return None
|
||||||
|
|
||||||
return path
|
return path
|
||||||
|
|
||||||
|
|
||||||
def size_to_bytes(value: str | int, alt_units: dict = None) -> int:
|
def size_to_bytes(value: str | int, alt_units: dict = None) -> int:
|
||||||
|
"""
|
||||||
|
Convert human-readable size string to number
|
||||||
|
size_to_bytes("1MiB") = 1048576
|
||||||
|
size_to_bytes("4K") = 4096
|
||||||
|
size_to_bytes("64b") = 8
|
||||||
|
size_to_bytes(123) = 123
|
||||||
|
size_to_bytes("2048s", {'s': 512}) = 1048576
|
||||||
|
"""
|
||||||
units = {
|
units = {
|
||||||
'b': 0.125, 'bit': 0.125, 'bits': 0.125, 'Bit': 0.125, 'Bits': 0.125,
|
'b': 0.125, 'bit': 0.125, 'bits': 0.125, 'Bit': 0.125, 'Bits': 0.125,
|
||||||
'B': 1, 'Byte': 1, 'Bytes': 1, 'bytes': 1, 'byte': 1,
|
'B': 1, 'Byte': 1, 'Bytes': 1, 'bytes': 1, 'byte': 1,
|
||||||
@ -71,45 +100,89 @@ def size_to_bytes(value: str | int, alt_units: dict = None) -> int:
|
|||||||
'z': 10**15, 'zB': 10**15, 'zb': 10**15, 'Z': 2**50, 'ZB': 2**50, 'ZiB': 2**50,
|
'z': 10**15, 'zB': 10**15, 'zb': 10**15, 'Z': 2**50, 'ZB': 2**50, 'ZiB': 2**50,
|
||||||
'y': 10**15, 'yB': 10**15, 'yb': 10**15, 'Y': 2**50, 'YB': 2**50, 'YiB': 2**50,
|
'y': 10**15, 'yB': 10**15, 'yb': 10**15, 'Y': 2**50, 'YB': 2**50, 'YiB': 2**50,
|
||||||
}
|
}
|
||||||
if type(value) is int: return value
|
if type(value) is int:
|
||||||
|
# return number directly
|
||||||
|
return value
|
||||||
elif type(value) is str:
|
elif type(value) is str:
|
||||||
|
# add custom units
|
||||||
if alt_units: units.update(alt_units)
|
if alt_units: units.update(alt_units)
|
||||||
|
|
||||||
|
# find all matched units
|
||||||
matches = {unit: len(unit) for unit in units if value.endswith(unit)}
|
matches = {unit: len(unit) for unit in units if value.endswith(unit)}
|
||||||
|
|
||||||
|
# find out the longest matched unit
|
||||||
max_unit = max(matches.values(), default=0)
|
max_unit = max(matches.values(), default=0)
|
||||||
|
|
||||||
|
# use the longest unit
|
||||||
unit = next((unit for unit in matches.keys() if matches[unit] == max_unit), None)
|
unit = next((unit for unit in matches.keys() if matches[unit] == max_unit), None)
|
||||||
|
|
||||||
|
# get mul for target unit
|
||||||
mul = units[unit] if unit else 1.0
|
mul = units[unit] if unit else 1.0
|
||||||
|
|
||||||
|
# convert string to target number
|
||||||
return int(float(value[:len(value)-max_unit].strip()) * mul)
|
return int(float(value[:len(value)-max_unit].strip()) * mul)
|
||||||
else: raise TypeError("bad size value")
|
else: raise TypeError("bad size value")
|
||||||
|
|
||||||
|
|
||||||
def bytes_pad(b: bytes, size: int, trunc: bool = False, pad: bytes = b'\0') -> bytes:
|
def bytes_pad(b: bytes, size: int, trunc: bool = False, pad: bytes = b'\0') -> bytes:
|
||||||
|
"""
|
||||||
|
Padding a bytes to specified length
|
||||||
|
"""
|
||||||
l = len(b)
|
l = len(b)
|
||||||
if l > size and trunc: b = b[:size]
|
|
||||||
if l < size: b += pad * (size - l)
|
# if larger than specified size, truncate
|
||||||
|
if l > size and trunc:
|
||||||
|
b = b[:size]
|
||||||
|
|
||||||
|
# if smaller than specified size, padding
|
||||||
|
if l < size:
|
||||||
|
b += pad * (size - l)
|
||||||
return b
|
return b
|
||||||
|
|
||||||
|
|
||||||
def round_up(value: int, align: int) -> int:
|
def round_up(value: int, align: int) -> int:
|
||||||
|
"""
|
||||||
|
Align up a number
|
||||||
|
round_down(0x2000, 0x1000) = 0x2000
|
||||||
|
round_down(0x2001, 0x1000) = 0x3000
|
||||||
|
round_down(0x1FFF, 0x1000) = 0x2000
|
||||||
|
"""
|
||||||
return (value + align - 1) & ~(align - 1)
|
return (value + align - 1) & ~(align - 1)
|
||||||
|
|
||||||
|
|
||||||
def round_down(value: int, align: int) -> int:
|
def round_down(value: int, align: int) -> int:
|
||||||
|
"""
|
||||||
|
Align down a number
|
||||||
|
round_down(0x2000, 0x1000) = 0x2000
|
||||||
|
round_down(0x2001, 0x1000) = 0x2000
|
||||||
|
round_down(0x1FFF, 0x1000) = 0x1000
|
||||||
|
"""
|
||||||
return value & ~(align - 1)
|
return value & ~(align - 1)
|
||||||
|
|
||||||
|
|
||||||
def open_config(path: str, mode=0o0644) -> io.TextIOWrapper:
|
def open_config(path: str, mode=0o0644) -> io.TextIOWrapper:
|
||||||
|
"""
|
||||||
|
Open a config file for write
|
||||||
|
If original file is existing, move to FILE.dist
|
||||||
|
"""
|
||||||
dist = f"{path}.dist"
|
dist = f"{path}.dist"
|
||||||
have_dist = False
|
have_dist = False
|
||||||
if os.path.exists(dist):
|
if os.path.exists(dist):
|
||||||
|
# dist file already exists, no move
|
||||||
have_dist = True
|
have_dist = True
|
||||||
elif os.path.exists(path):
|
elif os.path.exists(path):
|
||||||
|
# target file already exists, rename to dist
|
||||||
shutil.move(path, dist)
|
shutil.move(path, dist)
|
||||||
have_dist = True
|
have_dist = True
|
||||||
|
# FIXME: should not move previous write to dist
|
||||||
|
|
||||||
|
# open and truncate
|
||||||
flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC
|
flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC
|
||||||
fd = os.open(path=path, flags=flags, mode=mode)
|
fd = os.open(path=path, flags=flags, mode=mode)
|
||||||
if fd < 0: raise IOError(f"open {path} failed")
|
if fd < 0: raise IOError(f"open {path} failed")
|
||||||
try:
|
try:
|
||||||
fp = os.fdopen(fd, "w")
|
fp = os.fdopen(fd, "w")
|
||||||
|
# write a comment to tell user dist was renamed
|
||||||
fp.write("# This file is auto generated by arch-image-builder\n")
|
fp.write("# This file is auto generated by arch-image-builder\n")
|
||||||
if have_dist:
|
if have_dist:
|
||||||
fn = os.path.basename(dist)
|
fn = os.path.basename(dist)
|
||||||
@ -119,10 +192,18 @@ def open_config(path: str, mode=0o0644) -> io.TextIOWrapper:
|
|||||||
except:
|
except:
|
||||||
os.close(fd)
|
os.close(fd)
|
||||||
raise
|
raise
|
||||||
|
# file close managed by parent function
|
||||||
return fp
|
return fp
|
||||||
|
|
||||||
|
|
||||||
def path_to_name(path: str) -> str:
|
def path_to_name(path: str) -> str:
|
||||||
|
"""
|
||||||
|
Convert path to a identifier
|
||||||
|
path_to_name("") = "empty"
|
||||||
|
path_to_name("/") = "rootfs"
|
||||||
|
path_to_name("/boot") = "boot"
|
||||||
|
path_to_name("/etc/fstab") = "etc-fstab"
|
||||||
|
"""
|
||||||
if path == "/": return "rootfs"
|
if path == "/": return "rootfs"
|
||||||
if path.startswith("/"): path = path[1:]
|
if path.startswith("/"): path = path[1:]
|
||||||
if len(path) <= 0: return "empty"
|
if len(path) <= 0: return "empty"
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# Default user for ArchLinux ARM
|
||||||
sysconf:
|
sysconf:
|
||||||
user:
|
user:
|
||||||
- name: alarm
|
- name: alarm
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# Default user for ArchLinux
|
||||||
sysconf:
|
sysconf:
|
||||||
user:
|
user:
|
||||||
- name: arch
|
- name: arch
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
|
# Users in wheel group can run root commands without password
|
||||||
filesystem:
|
filesystem:
|
||||||
files:
|
files:
|
||||||
|
# polkit
|
||||||
- path: /etc/polkit-1/rules.d/99-wheel.rules
|
- path: /etc/polkit-1/rules.d/99-wheel.rules
|
||||||
mode: 0640
|
mode: 0640
|
||||||
content: |
|
content: |
|
||||||
@ -7,10 +9,12 @@ filesystem:
|
|||||||
if(subject.isInGroup("wheel"))
|
if(subject.isInGroup("wheel"))
|
||||||
return polkit.Result.YES;
|
return polkit.Result.YES;
|
||||||
});
|
});
|
||||||
|
# sudo
|
||||||
- path: /etc/sudoers.d/wheel
|
- path: /etc/sudoers.d/wheel
|
||||||
mode: 0640
|
mode: 0640
|
||||||
content: |
|
content: |
|
||||||
%wheel ALL=(ALL:ALL) NOPASSWD: ALL
|
%wheel ALL=(ALL:ALL) NOPASSWD: ALL
|
||||||
|
# Ensure sudo is installed
|
||||||
pacman:
|
pacman:
|
||||||
install:
|
install:
|
||||||
- sudo
|
- sudo
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# GNOME desktop
|
||||||
pacman:
|
pacman:
|
||||||
install:
|
install:
|
||||||
- gnome
|
- gnome
|
||||||
@ -6,4 +7,5 @@ systemd:
|
|||||||
enable:
|
enable:
|
||||||
- gdm.service
|
- gdm.service
|
||||||
+also:
|
+also:
|
||||||
|
# Ensure NetworkManager is enabled
|
||||||
- packages/network-manager
|
- packages/network-manager
|
||||||
|
@ -2,20 +2,36 @@ name: AYN Odin 2
|
|||||||
arch: aarch64
|
arch: aarch64
|
||||||
soc: qcs8550
|
soc: qcs8550
|
||||||
device: ayn-odin2
|
device: ayn-odin2
|
||||||
|
|
||||||
|
# hypdtbo for platform ABL boot
|
||||||
device_suffix: -hypdtbo
|
device_suffix: -hypdtbo
|
||||||
pacman:
|
pacman:
|
||||||
install:
|
install:
|
||||||
|
|
||||||
|
# For regulatory.db
|
||||||
- wireless-regdb
|
- wireless-regdb
|
||||||
|
|
||||||
|
# Qualcomm firmwares for AYN Odin2
|
||||||
- linux-firmware-ayn-odin2
|
- linux-firmware-ayn-odin2
|
||||||
|
|
||||||
|
# Mainline kernel for AYN Odin2
|
||||||
- linux-ayn-odin2-edge
|
- linux-ayn-odin2-edge
|
||||||
|
|
||||||
|
# Adreno 740 GPU
|
||||||
- mesa-qcom-git
|
- mesa-qcom-git
|
||||||
|
|
||||||
|
# Vulkan test tools
|
||||||
- vulkan-tools
|
- vulkan-tools
|
||||||
|
|
||||||
|
# Vulkan dependency
|
||||||
- xcb-util-keysyms
|
- xcb-util-keysyms
|
||||||
systemd:
|
systemd:
|
||||||
disable:
|
disable:
|
||||||
|
# No modem in this device
|
||||||
- rmtfs.service
|
- rmtfs.service
|
||||||
filesystem:
|
filesystem:
|
||||||
files:
|
files:
|
||||||
|
# GamePAD workaround
|
||||||
- path: /etc/udev/rules.d/99-${device}.rules
|
- path: /etc/udev/rules.d/99-${device}.rules
|
||||||
content: |
|
content: |
|
||||||
SUBSYSTEM=="input", ATTRS{name}=="Ayn Odin2 Gamepad", MODE="0666", ENV{ID_INPUT_MOUSE}="0", ENV{ID_INPUT_JOYSTICK}="1"
|
SUBSYSTEM=="input", ATTRS{name}=="Ayn Odin2 Gamepad", MODE="0666", ENV{ID_INPUT_MOUSE}="0", ENV{ID_INPUT_JOYSTICK}="1"
|
||||||
@ -41,6 +57,7 @@ filesystem:
|
|||||||
sysconf:
|
sysconf:
|
||||||
chassis: handset
|
chassis: handset
|
||||||
environments:
|
environments:
|
||||||
|
# Force to use zink driver for OpenGL
|
||||||
__GLX_VENDOR_LIBRARY_NAME: mesa
|
__GLX_VENDOR_LIBRARY_NAME: mesa
|
||||||
MESA_LOADER_DRIVER_OVERRIDE: zink
|
MESA_LOADER_DRIVER_OVERRIDE: zink
|
||||||
GALLIUM_DRIVER: zink
|
GALLIUM_DRIVER: zink
|
||||||
@ -53,6 +70,8 @@ kernel:
|
|||||||
- allow_mismatched_32bit_el0
|
- allow_mismatched_32bit_el0
|
||||||
mkinitcpio:
|
mkinitcpio:
|
||||||
files:
|
files:
|
||||||
|
# Put these firmware to initramfs for boot
|
||||||
|
# TODO: do not put they into initramfs
|
||||||
- /usr/lib/firmware/qcom/sm8550/ayn/odin2/adsp.mbn
|
- /usr/lib/firmware/qcom/sm8550/ayn/odin2/adsp.mbn
|
||||||
- /usr/lib/firmware/qcom/sm8550/ayn/odin2/adsp_dtb.mbn
|
- /usr/lib/firmware/qcom/sm8550/ayn/odin2/adsp_dtb.mbn
|
||||||
- /usr/lib/firmware/qcom/sm8550/ayn/odin2/cdsp.mbn
|
- /usr/lib/firmware/qcom/sm8550/ayn/odin2/cdsp.mbn
|
||||||
@ -72,8 +91,18 @@ mkinitcpio:
|
|||||||
- os/archlinuxarm
|
- os/archlinuxarm
|
||||||
- repo/archlinuxcn
|
- repo/archlinuxcn
|
||||||
- repo/renegade-project
|
- repo/renegade-project
|
||||||
|
|
||||||
|
# Generic qualcomm related config
|
||||||
- device/qcom
|
- device/qcom
|
||||||
|
|
||||||
|
# USB Gadget tools
|
||||||
- packages/systemd-gadget
|
- packages/systemd-gadget
|
||||||
|
|
||||||
|
# OpenSSH Server
|
||||||
- packages/openssh
|
- packages/openssh
|
||||||
|
|
||||||
|
# Text editor
|
||||||
- packages/editor
|
- packages/editor
|
||||||
|
|
||||||
|
# Bluetooth related services
|
||||||
- packages/bluez
|
- packages/bluez
|
||||||
|
@ -2,10 +2,19 @@ platform: qcom
|
|||||||
device_suffix:
|
device_suffix:
|
||||||
pacman:
|
pacman:
|
||||||
install:
|
install:
|
||||||
|
# Android A/B Slot Switcher
|
||||||
- qbootctl
|
- qbootctl
|
||||||
|
|
||||||
|
# Qualcomm IPC Router
|
||||||
- qrtr
|
- qrtr
|
||||||
|
|
||||||
|
# Modem remote filesystem
|
||||||
- rmtfs
|
- rmtfs
|
||||||
|
|
||||||
|
# TFTP via Qualcomm IPC Router
|
||||||
- tqftpserv
|
- tqftpserv
|
||||||
|
|
||||||
|
# Protection Domain Mapper
|
||||||
- pd-mapper
|
- pd-mapper
|
||||||
systemd:
|
systemd:
|
||||||
enable:
|
enable:
|
||||||
|
@ -1,31 +1,44 @@
|
|||||||
|
# I18N for Chinese (Simplified)
|
||||||
locale:
|
locale:
|
||||||
enable:
|
enable:
|
||||||
- "zh_CN.UTF-8 UTF-8"
|
- "zh_CN.UTF-8 UTF-8"
|
||||||
- "en_US.UTF-8 UTF-8"
|
- "en_US.UTF-8 UTF-8"
|
||||||
default: zh_CN.UTF-8
|
default: zh_CN.UTF-8
|
||||||
|
|
||||||
filesystem:
|
filesystem:
|
||||||
files:
|
files:
|
||||||
|
|
||||||
|
# Wireless regulatory
|
||||||
- path: /etc/conf.d/wireless-regdom
|
- path: /etc/conf.d/wireless-regdom
|
||||||
content: |
|
content: |
|
||||||
WIRELESS_REGDOM="CN"
|
WIRELESS_REGDOM="CN"
|
||||||
|
|
||||||
|
# China accelerated DNS
|
||||||
- path: /etc/systemd/resolved.conf.d/cn-dns.conf
|
- path: /etc/systemd/resolved.conf.d/cn-dns.conf
|
||||||
content: |
|
content: |
|
||||||
[Resolve]
|
[Resolve]
|
||||||
DNS=114.114.114.114 119.29.29.29
|
DNS=114.114.114.114 119.29.29.29
|
||||||
FallbackDNS=114.114.114.114 119.29.29.29
|
FallbackDNS=114.114.114.114 119.29.29.29
|
||||||
|
|
||||||
|
# China accelerated NTP Server
|
||||||
- path: /etc/systemd/timesyncd.conf.d/cn-ntp.conf
|
- path: /etc/systemd/timesyncd.conf.d/cn-ntp.conf
|
||||||
content: |
|
content: |
|
||||||
[Time]
|
[Time]
|
||||||
NTP=cn.ntp.org.cn
|
NTP=cn.ntp.org.cn
|
||||||
|
|
||||||
pacman:
|
pacman:
|
||||||
install:
|
install:
|
||||||
|
# Fonts for CJK language
|
||||||
- noto-fonts-cjk
|
- noto-fonts-cjk
|
||||||
- wqy-bitmapfont
|
- wqy-bitmapfont
|
||||||
- wqy-microhei
|
- wqy-microhei
|
||||||
- wqy-microhei-lite
|
- wqy-microhei-lite
|
||||||
- wqy-zenhei
|
- wqy-zenhei
|
||||||
|
|
||||||
|
# Input method for Chinese
|
||||||
- ibus
|
- ibus
|
||||||
- ibus-libpinyin
|
- ibus-libpinyin
|
||||||
|
|
||||||
sysconf:
|
sysconf:
|
||||||
environments:
|
environments:
|
||||||
GTK_IM_MODULE: ibus
|
GTK_IM_MODULE: ibus
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# Bluetooth
|
||||||
pacman:
|
pacman:
|
||||||
install:
|
install:
|
||||||
- bluez
|
- bluez
|
||||||
|
@ -1,10 +1,13 @@
|
|||||||
|
# USB Gadget tools
|
||||||
pacman:
|
pacman:
|
||||||
install:
|
install:
|
||||||
- systemd-gadget
|
- systemd-gadget
|
||||||
- dnsmasq
|
- dnsmasq
|
||||||
systemd:
|
systemd:
|
||||||
disable:
|
disable:
|
||||||
|
# Disable tty ACM to avoid bugs
|
||||||
- getty@ttyGS0.service
|
- getty@ttyGS0.service
|
||||||
- usbgadget-func-acm.service
|
- usbgadget-func-acm.service
|
||||||
enable:
|
enable:
|
||||||
|
# Enable systemd-networkd for RNDIS
|
||||||
- systemd-networkd.service
|
- systemd-networkd.service
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
filesystem:
|
filesystem:
|
||||||
files:
|
files:
|
||||||
|
# Network configs from archiso
|
||||||
- path: /etc/systemd/network/20-ethernet.network
|
- path: /etc/systemd/network/20-ethernet.network
|
||||||
content: |
|
content: |
|
||||||
[Match]
|
[Match]
|
||||||
|
Loading…
Reference in New Issue
Block a user