root@kali-pi4:/proc/sys/net/ipv4# apt update
Obj:1 http://kali.download/kali kali-rolling InRelease
Des:2 http://http.re4son-kernel.com/re4son kali-pi InRelease [8.133 B]
Err:2 http://http.re4son-kernel.com/re4son kali-pi InRelease
Las siguientes firmas no fueron válidas: EXPKEYSIG 11764EE8AC24832F Carsten Boeving <carsten.boeving@whitedome.com.au>
Descargados 8.133 B en 2s (4.518 B/s)
Leyendo lista de paquetes... Hecho
Creando árbol de dependencias
Leyendo la información de estado... Hecho
Se pueden actualizar 1010 paquetes. Ejecute «apt list --upgradable» para verlos.
W: Se produjo un error durante la verificación de las firmas. El repositorio no está actualizado y se utilizarán los ficheros de índice antiguos. Error de GPG: http://http.re4son-kernel.com/re4son kali-pi InRelease: Las siguientes firmas no fueron válidas: EXPKEYSIG 11764EE8AC24832F Carsten Boeving <carsten.boeving@whitedome.com.au>
W: Fallo al obtener http://http.re4son-kernel.com/re4son/dists/kali-pi/InRelease Las siguientes firmas no fueron válidas: EXPKEYSIG 11764EE8AC24832F Carsten Boeving <carsten.boeving@whitedome.com.au>
W: No se han podido descargar algunos archivos de índice, se han omitido, o se han utilizado unos antiguos en su lugar.
root@kali-pi4:/proc/sys/net/ipv4# Las siguientes firmas no fueron válidas: EXPKEYSIG 11764EE8AC24832Funame -a^C
root@kali-pi4:/proc/sys/net/ipv4#
root@kali-pi4:/proc/sys/net/ipv4#
root@kali-pi4:/proc/sys/net/ipv4# uname -a
Linux kali-pi4 4.19.81-Re4son-v8l+ #1 SMP PREEMPT Wed Nov 6 07:24:49 UTC 2019 aarch64 GNU/Linux
root@kali-pi4:/proc/sys/net/ipv4# gpg --keyserver hkp://keys.gnupg.net --recv-key AC24832F
gpg: caja de claves '/root/.gnupg/pubring.kbx' creada
gpg: /root/.gnupg/trustdb.gpg: se ha creado base de datos de confianza
gpg: clave 11764EE8AC24832F: clave pública "Carsten Boeving <carsten.boeving@whitedome.com.au>" importada
gpg: Cantidad total procesada: 1
gpg: importadas: 1
root@kali-pi4:/proc/sys/net/ipv4# gpg -a --export AC24832F | apt-key add -
OK
root@kali-pi4:/proc/sys/net/ipv4# apt update
Obj:1 http://kali.download/kali kali-rolling InRelease
Des:2 http://http.re4son-kernel.com/re4son kali-pi InRelease [8.133 B]
Des:3 http://http.re4son-kernel.com/re4son kali-pi/main arm64 Packages [13,8 kB]
Descargados 21,9 kB en 2s (9.870 B/s)
Leyendo lista de paquetes... Hecho
Creando árbol de dependencias
Leyendo la información de estado... Hecho
Se pueden actualizar 1010 paquetes. Ejecute «apt list --upgradable» para verlos.
dimarts, 3 de novembre del 2020
divendres, 10 de juliol del 2020
root@ies:~# pct listcan't open '/sys/fs/cgroup/blkio///lxc/.....' - No such file or directory
upgrade proxmox5.4 to proxmox6
if yes, then try running
afterwards you should be booting into the new kernel.
if yes, then try running
pve-efiboot-tool kernel list
to see the available/loaded kernels. check the output to see which kernel is selected.pve-efiboot-tool refresh
will copy found kernels and create boot entries.afterwards you should be booting into the new kernel.
dimecres, 3 de juliol del 2019
afegir disc ssd proxmox lmvthin
afegir disc ssd 480G sanddisk
fdisk /dev/sdd
n nova partició primària
i type 8e
pvcreate --metadatasize 250k -y -ff /dev/sdd1
vgcreate ssd480 /dev/sdd1
crear volumen logic i modicar-lo a thin-client tipus
lvcreate -L 420G -n lvm-ssd480 ssd480
lvconvert --type thin-pool ssd480/lvm-ssd80
root@ies:~# more /etc/pve/storage.cfg
lvmthin: localssd480
thinpool lvm-ssd480
vgname ssd480
content images,rootdir
fdisk /dev/sdd
n nova partició primària
i type 8e
pvcreate --metadatasize 250k -y -ff /dev/sdd1
vgcreate ssd480 /dev/sdd1
crear volumen logic i modicar-lo a thin-client tipus
lvcreate -L 420G -n lvm-ssd480 ssd480
lvconvert --type thin-pool ssd480/lvm-ssd80
root@ies:~# more /etc/pve/storage.cfg
lvmthin: localssd480
thinpool lvm-ssd480
vgname ssd480
content images,rootdir
root@ies:~# pvesm status
Name Type Status Total Used Available %
local dir active 98559220 80857492 12652180 82.04%
local-2t lvmthin active 1887436800 619456757 1267980042 32.82%
local-lvm lvmthin active 850669568 126579631 724089936 14.88%
local-ssdlvm lvmthin inactive 0 0 0 0.00%
localssd480 lvmthin active 440401920 0 440401920 0.00%
maquetacio-rafa cifs active 1786248964 1022766164 763482800 57.26%
nas-dlink nfs active 3840539008 3817249152 23289856 99.39%
root@ies:~#
root@ies:~#
root@ies:~# pvesm scan lvm
vmdata
pve
ssd480
root@ies:~# pvesm lvmthinscan ssd480
lvm-ssd480
root@ies:~# pvesm lvmthinscan pve
data
root@ies:~# pvesm lvmthinscan vmdata
vmstore
root@ies:~# pvesm list local-lvm
local-lvm:vm-104-disk-1 raw 21474836480 104
local-lvm:vm-106-disk-1 raw 4294967296 106
local-lvm:vm-116-disk-1 raw 4294967296 116
local-lvm:vm-118-disk-1 raw 4294967296 118
local-lvm:vm-120-disk-1 raw 8589934592 120
local-lvm:vm-211-disk-1 raw 34359738368 211
local-lvm:vm-330-disk-1 raw 68719476736 330
local-lvm:vm-330-disk-2 raw 68719476736 330
local-lvm:vm-510-disk-1 raw 8589934592 510
local-lvm:vm-550-disk-0 raw 34359738368 550
local-lvm:vm-609-disk-0 raw 8589934592 609
local-lvm:vm-719-disk-0 raw 30064771072 719
local-lvm:vm-721-disk-0 raw 8589934592 721
local-lvm:vm-741-disk-0 raw 8589934592 741
dijous, 25 d’octubre del 2018
regla tallafoc a un horari determinat
iptables -I FORWARD 1 -p tcp --sport 53 -d 10.1.1.0/22 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p tcp --sport 53 -d 10.1.23.0/24 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p udp --sport 53 -d 10.1.1.0/22 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p udp --sport 53 -d 10.1.23.0/24 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p tcp --sport 53 -d 10.1.6.0/24 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p udp --sport 53 -d 10.1.6.0/24 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.1.0/22 -p tcp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.23.0/24 -p tcp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.1.0/22 -p udp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.23.0/24 -p udp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.6.0/24 -p tcp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.6.0/24 -p udp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
regles per divendres des de 18:30 a 21:30 es poguin fer peticiones als servidors dns arrel o qualsevol altre extern.
iptables -I FORWARD 1 -p tcp --sport 53 -d 10.1.23.0/24 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p udp --sport 53 -d 10.1.1.0/22 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p udp --sport 53 -d 10.1.23.0/24 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p tcp --sport 53 -d 10.1.6.0/24 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p udp --sport 53 -d 10.1.6.0/24 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.1.0/22 -p tcp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.23.0/24 -p tcp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.1.0/22 -p udp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.23.0/24 -p udp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.6.0/24 -p tcp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.6.0/24 -p udp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
regles per divendres des de 18:30 a 21:30 es poguin fer peticiones als servidors dns arrel o qualsevol altre extern.
dimarts, 2 d’octubre del 2018
ies kernel: audit: type=1400 audit(1538480598.585:12): apparmor="DENIED" operation="mount" info="failed mntpnt match" error=-13 profile="/usr/bin/lxc-start" name="/" pid=2180 comm="lxc-start" flags="rw, rslave"
error proxmox després d'actualitzar-se i no arrancar els contenidors lxc
pct start 400
AVC apparmor="DENIED" operation="mount" info="failed mntpnt match" error=-13 profile="/usr/bin/lxc-start" name="/" pid=2180 comm="lxc-start" flags="rw, rslave"
Oct 02 13:43:18 ies kernel: audit: type=1400 audit(1538480598.585:12): apparmor="DENIED" operation="mount" info="failed mntpnt match" error=-13 profile="/usr/bin/lxc-start" name="/" pid=2180 comm="lxc-start" flags="rw, rslave"
deprés de fer seguiment d'alguns links davant aquest error aquest m'ha funcionat
https://forum.proxmox.com/threads/apparmor-preventing-lxcs-starting-after-update.42060/
I needed to manually add
deb http://download.proxmox.com/debian/pve stretch pve-no-subscription then run apt-get full-upgrade (and update ofcourse) to fix it. I am writing this as a heads up to all other people who has the same problem and as sort of bug report.
pct start 400
AVC apparmor="DENIED" operation="mount" info="failed mntpnt match" error=-13 profile="/usr/bin/lxc-start" name="/" pid=2180 comm="lxc-start" flags="rw, rslave"
Oct 02 13:43:18 ies kernel: audit: type=1400 audit(1538480598.585:12): apparmor="DENIED" operation="mount" info="failed mntpnt match" error=-13 profile="/usr/bin/lxc-start" name="/" pid=2180 comm="lxc-start" flags="rw, rslave"
deprés de fer seguiment d'alguns links davant aquest error aquest m'ha funcionat
https://forum.proxmox.com/threads/apparmor-preventing-lxcs-starting-after-update.42060/
I needed to manually add
deb http://download.proxmox.com/debian/pve stretch pve-no-subscription then run apt-get full-upgrade (and update ofcourse) to fix it. I am writing this as a heads up to all other people who has the same problem and as sort of bug report.
si, ara ja puc arrancar el container
i un membre de l'starff ja indica que és correcte afegir aquest repositori
if you don't have a subscription, you need to enable the no-subscription
repository like described in the docs. otherwise, you don't get
updates, and thus also no bug and security fixes.
divendres, 28 de setembre del 2018
listado de backups del día
root@hp350:/mnt/backup/dump# ls -lt|egrep 'Sep 28'|awk '{ print $9}'
vzdump-lxc-700-2018_09_28-03_15_20.log
vzdump-lxc-700-2018_09_28-03_15_20.tar.lzo
vzdump-qemu-515-2018_09_28-03_12_21.log
vzdump-qemu-515-2018_09_28-03_12_21.vma.lzo
vzdump-qemu-510-2018_09_28-03_11_35.log
vzdump-qemu-510-2018_09_28-03_11_35.vma.lzo
vzdump-qemu-504-2018_09_28-03_10_17.log
vzdump-qemu-504-2018_09_28-03_10_17.vma.lzo
vzdump-qemu-503-2018_09_28-03_09_34.log
vzdump-qemu-503-2018_09_28-03_09_34.vma.lzo
vzdump-qemu-502-2018_09_28-03_07_29.log
vzdump-qemu-502-2018_09_28-03_07_29.vma.lzo
vzdump-lxc-405-2018_09_28-03_03_06.log
vzdump-lxc-405-2018_09_28-03_03_06.tar.lzo
vzdump-lxc-404-2018_09_28-02_51_44.log
vzdump-lxc-404-2018_09_28-02_51_44.tar.lzo
vzdump-lxc-120-2018_09_28-02_50_44.log
vzdump-lxc-120-2018_09_28-02_50_44.tar.lzo
vzdump-lxc-118-2018_09_28-02_49_33.log
vzdump-lxc-118-2018_09_28-02_49_33.tar.lzo
vzdump-lxc-116-2018_09_28-02_48_27.log
vzdump-lxc-116-2018_09_28-02_48_27.tar.lzo
vzdump-lxc-108-2018_09_28-02_38_40.log
vzdump-lxc-108-2018_09_28-02_38_40.tar.lzo
vzdump-lxc-106-2018_09_28-02_36_29.log
vzdump-lxc-106-2018_09_28-02_36_29.tar.lzo
vzdump-lxc-104-2018_09_28-02_31_23.log
vzdump-lxc-104-2018_09_28-02_31_23.tar.lzo
vzdump-lxc-101-2018_09_28-02_30_02.log
vzdump-lxc-101-2018_09_28-02_30_02.tar.lzo
root@hp350:/mnt/backup/dump# ls -lt|egrep 'Sep 28'|awk '{ print $9}'|grep tar.lzo
vzdump-lxc-700-2018_09_28-03_15_20.tar.lzo
vzdump-lxc-405-2018_09_28-03_03_06.tar.lzo
vzdump-lxc-404-2018_09_28-02_51_44.tar.lzo
vzdump-lxc-120-2018_09_28-02_50_44.tar.lzo
vzdump-lxc-118-2018_09_28-02_49_33.tar.lzo
vzdump-lxc-116-2018_09_28-02_48_27.tar.lzo
vzdump-lxc-108-2018_09_28-02_38_40.tar.lzo
vzdump-lxc-106-2018_09_28-02_36_29.tar.lzo
vzdump-lxc-104-2018_09_28-02_31_23.tar.lzo
vzdump-lxc-101-2018_09_28-02_30_02.tar.lzo
hasta que lo afine la copia es:
rsync vzdump*2018_10_05* -A ies@192.168. :/mnt/usb
vzdump-lxc-700-2018_09_28-03_15_20.log
vzdump-lxc-700-2018_09_28-03_15_20.tar.lzo
vzdump-qemu-515-2018_09_28-03_12_21.log
vzdump-qemu-515-2018_09_28-03_12_21.vma.lzo
vzdump-qemu-510-2018_09_28-03_11_35.log
vzdump-qemu-510-2018_09_28-03_11_35.vma.lzo
vzdump-qemu-504-2018_09_28-03_10_17.log
vzdump-qemu-504-2018_09_28-03_10_17.vma.lzo
vzdump-qemu-503-2018_09_28-03_09_34.log
vzdump-qemu-503-2018_09_28-03_09_34.vma.lzo
vzdump-qemu-502-2018_09_28-03_07_29.log
vzdump-qemu-502-2018_09_28-03_07_29.vma.lzo
vzdump-lxc-405-2018_09_28-03_03_06.log
vzdump-lxc-405-2018_09_28-03_03_06.tar.lzo
vzdump-lxc-404-2018_09_28-02_51_44.log
vzdump-lxc-404-2018_09_28-02_51_44.tar.lzo
vzdump-lxc-120-2018_09_28-02_50_44.log
vzdump-lxc-120-2018_09_28-02_50_44.tar.lzo
vzdump-lxc-118-2018_09_28-02_49_33.log
vzdump-lxc-118-2018_09_28-02_49_33.tar.lzo
vzdump-lxc-116-2018_09_28-02_48_27.log
vzdump-lxc-116-2018_09_28-02_48_27.tar.lzo
vzdump-lxc-108-2018_09_28-02_38_40.log
vzdump-lxc-108-2018_09_28-02_38_40.tar.lzo
vzdump-lxc-106-2018_09_28-02_36_29.log
vzdump-lxc-106-2018_09_28-02_36_29.tar.lzo
vzdump-lxc-104-2018_09_28-02_31_23.log
vzdump-lxc-104-2018_09_28-02_31_23.tar.lzo
vzdump-lxc-101-2018_09_28-02_30_02.log
vzdump-lxc-101-2018_09_28-02_30_02.tar.lzo
root@hp350:/mnt/backup/dump# ls -lt|egrep 'Sep 28'|awk '{ print $9}'|grep tar.lzo
vzdump-lxc-700-2018_09_28-03_15_20.tar.lzo
vzdump-lxc-405-2018_09_28-03_03_06.tar.lzo
vzdump-lxc-404-2018_09_28-02_51_44.tar.lzo
vzdump-lxc-120-2018_09_28-02_50_44.tar.lzo
vzdump-lxc-118-2018_09_28-02_49_33.tar.lzo
vzdump-lxc-116-2018_09_28-02_48_27.tar.lzo
vzdump-lxc-108-2018_09_28-02_38_40.tar.lzo
vzdump-lxc-106-2018_09_28-02_36_29.tar.lzo
vzdump-lxc-104-2018_09_28-02_31_23.tar.lzo
vzdump-lxc-101-2018_09_28-02_30_02.tar.lzo
hasta que lo afine la copia es:
rsync vzdump*2018_10_05* -A ies@192.168. :/mnt/usb
dijous, 27 de setembre del 2018
WARNING: Sum of all thin volume sizes (932.00 GiB) exceeds the size of thin pool pve/data and the size of whole volume group (931.26 GiB)!
fent una migració de màquines entre dos proxmox, em trobo amb aquest missatge d'alerta en una de les restauracions.
WARNING: Sum of all thin volume sizes (932.00 GiB) exceeds the size of thin pool pve/data and the size of whole volume group (931.26 GiB)!
alegrement he anat traspassant màquines pero em trobo que m'he quedat sense espai?
la suma dels espais em dona per sobre del límit que tinc
la suma de tots els termes em dona
i tried with "pct resize 106 rootfs 80gb" (from 200gb)
But i get: "unable to shrink disk size"
WARNING: Sum of all thin volume sizes (932.00 GiB) exceeds the size of thin pool pve/data and the size of whole volume group (931.26 GiB)!
alegrement he anat traspassant màquines pero em trobo que m'he quedat sense espai?
la suma dels espais em dona per sobre del límit que tinc
root@ies:~# lvscan
ACTIVE '/dev/pve/swap' [8.00 GiB] inherit
ACTIVE '/dev/pve/root' [96.00 GiB] inherit
ACTIVE '/dev/pve/data' [811.26 GiB] inherit
ACTIVE '/dev/pve/vm-330-disk-1' [64.00 GiB] inherit
ACTIVE '/dev/pve/vm-330-disk-2' [64.00 GiB] inherit
ACTIVE '/dev/pve/vm-403-disk-1' [100.00 GiB] inherit
ACTIVE '/dev/pve/vm-402-disk-1' [108.00 GiB] inherit
ACTIVE '/dev/pve/vm-108-disk-1' [21.00 GiB] inherit
ACTIVE '/dev/pve/vm-118-disk-1' [4.00 GiB] inherit
ACTIVE '/dev/pve/vm-116-disk-1' [4.00 GiB] inherit
ACTIVE '/dev/pve/vm-120-disk-1' [16.00 GiB] inherit
ACTIVE '/dev/pve/vm-104-disk-1' [20.00 GiB] inherit
ACTIVE '/dev/pve/vm-101-disk-1' [13.00 GiB] inherit
ACTIVE '/dev/pve/vm-106-disk-1' [4.00 GiB] inherit
ACTIVE '/dev/pve/vm-400-disk-1' [208.00 GiB] inherit
ACTIVE '/dev/pve/vm-206-disk-1' [202.00 GiB] inherit
ACTIVE '/dev/pve/vm-510-disk-1' [8.00 GiB] inherit
ACTIVE '/dev/pve/vm-520-disk-1' [32.00 GiB] inherit
ACTIVE '/dev/pve/vm-502-disk-1' [32.00 GiB] inherit
ACTIVE '/dev/pve/vm-211-disk-1' [32.00 GiB] inheritla suma de tots els termes em dona
64 |
64 |
100 |
108 |
21 |
4 |
4 |
16 |
20 |
13 |
4 |
208 |
202 |
8 |
32 |
32 |
32 |
932 |
Em miro la màquina 400
root@ies:~# pct enter 400
root@moodle:~# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/pve-vm--400--disk--1 204G 144G 50G 75% /
on puc veure que tinc disponible 50 GB, miraré si puc reduir el tamany del disc amb el proxmox?
en alguna cerca de google trobo
But i get: "unable to shrink disk size"
de fet l'aplicatiu web de proxmox invalida posar valors negatius per decrementar
FINALMENT OPTO PER L'OPCIÓ MÉS LENTA, FER UNA BACKUP I RESTAURAR AMB UN TAMANY MÉS PETIT, aquestes imatges que hi han son només per manternir-se encesses pero no han de creixer ja, donat que son consultives només.
You probably figured it out already, but that seems to be a
limitation of LXC containers, at least for now. From the pct man page :
Good luck,
\+?\d+(\.\d+)?[KMGT]?
The new size. With the + sign the value is added to the actual size of the volume and without it,
the value is taken as an absolute one. Shrinking disk size is not supported.
If you truly want to shrink a container, I guess you'd have to perform a backup, then restore it with the --rootfs local:
option, like so :pct stop
vzdump -storage local -compress lzo
pct destroy
pct restore /var/lib/lxc/vzdump-lxc--....tar.lzo --rootfs local:
funciona fins ara per reduir i recuperar almenys 100GB de 4 màquines.
Of course, you can't perform this sort of resizing online, so I
wouldn't call it a great solution, but it works if you have no other
choice.Good luck,
https://serverfault.com/questions/784890/how-to-shrink-the-disk-of-a-lxc-container-on-proxmox-4
Subscriure's a:
Missatges (Atom)