Monday 26 October 2015

Getting a DESKTOP on centos!

Do a minimal install of centos 6.5 and start doing this:

# yum -y groupinstall "Desktop" "Desktop Platform" "X Window System" "Fonts" "General Purpose Desktop"

# init 5

To have CentOS boot into runlevel 5 “X11” instead of runlevel 3 “Full multiuser mode”, modify the /etc/inittab file to change start up level like this :

# nano /etc/inittab

id:3:initdefault:

to

id:5:initdefault:

Job Done!

Friday 2 October 2015

AWS Auto Scaling - Complete CLI Solution

#! /bin/bash

function ASG {
read -e -p "Enter Min no. of instances(Integer Value)...: " min
echo ""
read -e -p "Enter Max no. of instances(Integer Value)...: " max
echo ""
read -e -p "Enter Desired no. of instances(Integer Value)...: " desired
echo ""
aws autoscaling create-auto-scaling-group --auto-scaling-group-name $Cluster-$COMPONENT-ASG --launch-configuration-name RTB-$COMPONENT-ASG --min-size $min --max-size $max --desired-capacity $desired --availability-zones us-east-1e --health-check-type ELB --health-check-grace-period 240 --default-cooldown 300 --load-balancer-names "RTB" --vpc-zone-identifier subnet-fea1f0d5 ; check;
}

function config {
echo ""
aws autoscaling create-launch-configuration --launch-configuration-name $Cluster-$COMPONENT-ASG --image-id ami-b7f68bd2 --instance-type $COMPONENT --spot-price "0.27" --no-associate-public-ip-address  --security-groups sg-665df302 --user-data $File --key-name rtbservers --instance-monitoring Enabled=false --no-ebs-optimized --block-device-mappings "[{\"DeviceName\":\"/dev/xvda\",\"Ebs\":{\"SnapshotId\":\"snap-68dc2c09\",\"VolumeSize\":10,\"VolumeType\": \"gp2\",\"DeleteOnTermination\": true}},{\"DeviceName\": \"/dev/sdb\",\"VirtualName\":\"ephemeral0\"}]" ; check && ASG;
}

function check {
if [ $? -eq 0 ]
then
echo "Command Executed Successfully"
echo ""
else
echo "Error, Please try again!"
echo ""
exit 1
fi
}

function type {
echo ""
echo "Please select the desired Instance type"
echo ""

select COMPONENT in c3.2xlarge c3.4xlarge m3.2xlarge r3.2xlarge r3.4xlarge g2.2xlarge
do
case $COMPONENT in

c3.2xlarge) config;
            break;;
c3.4xlarge) config;
            break;;
m3.2xlarge) config;
            break;;
r3.2xlarge) config;
            break;;
r3.4xlarge) config;
            break;;
g2.2xlarge) config;
            break;;
esac
done
}

echo ""
echo "Please select the desired Cluster type"
echo ""

select Cluster in RTB RTB3 RTB4
do
case $Cluster in
RTB) File="file:///operations/users/naveed/bootscriptrtb.txt"
     type;
     break;;
RTB3) File="file:///operations/users/naveed/bootscriptrtb3.txt"
      type;
      break;;
RTB4) File="file:///operations/users/naveed/bootscriptrtb4.txt"
      type;
      break;;
esac
done

echo "Please configure AS Policy"
echo ""
read -e -p "Enter No. of Instance to be affected (Integer Value)...: " aff
echo ""

aws autoscaling put-scaling-policy --policy-name increase-policy --auto-scaling-group-name $Cluster-$COMPONENT-ASG --scaling-adjustment $aff --adjustment-type ChangeInCapacity --output text > increase.txt
inc=$(cat increase.txt)
aws autoscaling put-scaling-policy --policy-name decrease-policy --auto-scaling-group-name $Cluster-$COMPONENT-ASG --scaling-adjustment -$aff --adjustment-type ChangeInCapacity --output text > decrease.txt
dec=$(cat decrease.txt)

echo "Please configure AS Alarm"
echo ""
read -e -p "Enter High CPU threshold (Integer Value)...: " high
echo ""
read -e -p "Enter Lower CPU threshold (Integer Value)...: " low
echo ""

aws cloudwatch put-metric-alarm --alarm-name $Cluster-$COMPONENT-AddCapacity --metric-name $Cluster-$COMPONENT-CPU-HIGH --namespace AWS/EC2 --statistic Average --period 300 --threshold $high --comparison-operator GreaterThanOrEqualToThreshold --dimensions "Name=AutoScalingGroupName,Value=$Cluster-$COMPONENT-ASG" --evaluation-periods 2 --alarm-actions $inc
aws cloudwatch put-metric-alarm --alarm-name $Cluster-$COMPONENT-RemoveCapacity --metric-name $Cluster-$COMPONENT-CPU-LOW --namespace AWS/EC2 --statistic Average --period 300 --threshold $low --comparison-operator LessThanOrEqualToThreshold --dimensions "Name=AutoScalingGroupName,Value=$Cluster-$COMPONENT-ASG" --evaluation-periods 2 --alarm-actions $dec

rm -fr increase.txt decrease.txt

echo "Job Finished!"
echo ""

Friday 25 September 2015

Fixing the GPG missing keys error

Sometimes we face the missing gpg key issue when we add diffrent sources to sources.list files, well this can be fixed easily:
Im gonna show an example where im using ubuntu 14 and try to first add sources of debian jessie 8.

first go to http://debgen.simplylinux.ch/

get your source list, for eg:

deb http://ftp.us.debian.org/debian testing main contrib non-free
deb-src http://ftp.us.debian.org/debian testing main contrib non-free

deb http://security.debian.org/ jessie/updates main contrib non-free
deb-src http://security.debian.org/ jessie/updates main contrib non-free


paste this in

nano /etc/apt/sources.list

then run this script:

#!/bin/bash

apt-get update 2> /tmp/keymissing; for key in $(grep "NO_PUBKEY" /tmp/keymissing |sed "s/.*NO_PUBKEY //"); do echo -e "\nProcessing key: $key"; gpg --keyserver pgpkeys.mit.edu --recv $key && gpg --export --armor $key | apt-key add -; done
apt-get update 2> /tmp/keymissing; for key in $(grep "NO_PUBKEY" /tmp/keymissing |sed "s/.*NO_PUBKEY //"); do echo -e "\nProcessing key: $key"; gpg --keyserver subkeys.pgp.net --recv $key && gpg --export --armor $key | apt-key add -; done

# it works if the command finished and says things like:
#  gpg: requesting key 46925553 from hkp server pgpkeys.mit.edu
#  gpg: key 46925553: "Debian Archive Automatic Signing Key (7.0/wheezy)
#  <ftpmaster@debian.org>" not changed
#  gpg: Total number processed: 1
#  gpg:              unchanged: 1

# If doesnt work and stuck on
# gpg: requesting key 46925553 from hkp server pgpkeys.mit.edu

# Then copy key from this line
# "Processing key: 8B48AD6246925553"
# and google search "8B48AD6246925553" this will return keyserver, like below, put those in for --keyserver line

# And look for keyserver

# Works for debian: pgpkeys.mit.edu
# Works for ubuntu: subkeys.pgp.net


This will work for both, ubuntu and debian - vice versa.

Special thanks to :  http://ram.kossboss.com/fix-missing-gpg-key-apt-get/

Monday 7 September 2015

Init Script Alternative

Install upstart package from repo, avaiable in both debian and centos.

To create a job to be started automatically when Ubuntu starts. As written example, suppose create the following file /etc/init/testservice.conf with sudo:

# testservice - test service job file

description "my service description"
author "Me <myself@i.com>"

# Stanzas
#
# Stanzas control when and how a process is started and stopped
# See a list of stanzas here: http://upstart.ubuntu.com/wiki/Stanzas

# When to start the service
start on runlevel [2345]

# When to stop the service
stop on runlevel [016]

# Automatically restart process if crashed
respawn

# Essentially lets upstart know the process will detach itself to the background
# This option does not seem to be of great importance, so it does not need to be set.
#expect fork

# Specify working directory
chdir /home/user/testcode

# Specify the process/command to start, e.g.
exec python mycommand.py arg1 arg2

To 'manually' start or stop the process use

sudo start testservice
sudo stop testservice

The above example works great for everything , but however, there is short cut too :

The solution is to use a shutdown-hook for the system. In Ubuntu it is very easy to add a shutdown-hook by using upstart.

The upstart job config file (/etc/init/shutdown-hook.conf) should look something like this:

description "run at shutdown"

start on starting rc
task
exec /bin/bash /etc/my_service/upload_log_to_s3.sh log

Finally I just needed the script (/etc/my_service/upload_log_to_s3.sh) for uploading a log file to S3:

#!/bin/bash

log_file_ext=$1

gzip -c /var/log/my_service/*.$log_file_ext > /tmp/log.gz

aws s3 cp /tmp/log.gz s3://my_service_bucket/logs/`date +%Y-%m-%dT%H:%M:%SZ`.log.gz

The upload script will just gzip the log file (needed as I’m using delaycompress), rename the log file to the current timestamp, and upload the file using aws-cli. The argument sets the file extension of the log file, which is necessary to be able to upload both the current (.log) as well as the previous log file (.log.1).

Sunday 6 September 2015

Post HTTP API grafana Query



The details are as follows:


URL : http://www.nasheikh.com:3000/api/datasources?=
Option : Post
Headers :  Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
Payload : 

{
        "name":"test3",
        "type":"influxdb_08",
        "url":"http://localhost:8086",
        "access":"proxy",
        "basicAuth":false,
        "database":"server1",
        "user":"root",
        "password":"root"
}


Do this in one liner in curl :

curl -H "Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk" -H "Accept: application/json" -H "Content-type: application/json" -X POST -d @datainject.json http://www.nasheikh.com:3000/api/datasources?=

Here datainject.json is the file which is pasted above just saved as a json file and passed in curl.

Watch a file for its content, and perform an action accordingly:


Parent Script

#!/bin/bash

while true; do
  change=$(inotifywait -e close_write,moved_to,create .)
  change=${change#./ * }
  if [ "$change" = "changingfile.txt" ]; then ./child.sh; fi
done

Child Script

#!/bin/bash

rm -fr middle*
count=0
eraser=0
while read server
do
let count=$count+1
sed 's/server1/'${server}'/g;s/Tracker/'${server}'/g' center > middle.$count
sed 's/test1/'${server}'/g' datainject.json > newdata.$count.json
curl -H "Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk" -H "Accept: application/json" -H "Content-type: application/json" -X POST -d @newdata.$count.json http://www.nasheikh.com:3000/api/datasources?=
sleep 2
done < network.txt
eraser=$(echo middle.$count)
sed -i '$ s/.$//' $eraser
sleep 2
cat prepend middle* final > merged.json
curl -H "Authorization: Bearer eyJrIjoiNzNjc2s5amxkZW9aUE81UXV3dVVOcmRHNTJhRkVOY1oiLCJuIjoiYWRtaW4iLCJpZCI6MX0=" -H "Accept: application/json" -H "Content-type: application/json" -X POST -d @merged.json http://nasheikh.com:3000/api/dashboards/db
rm -fr middle* merged.json newdata*

========================================================================

Saturday 5 September 2015

Update Server List from Amazon VPC

#!/bin/bash

aws ec2 describe-instances --query 'Reservations[*].Instances[*].[PublicIpAddress]' --output text --profile nash >> mainusa.txt

while true

do

aws ec2 describe-instances --query 'Reservations[*].Instances[*].[PublicIpAddress]' --output text --profile nash >> freshusa.txt

diff -Naur mainusa.txt freshusa.txt > my.patch

check=$(cat my.patch | wc -l)

echo $check

if [ "$check" -gt 0 ]

then

patch mainusa.txt < my.patch

else

echo "no chnage"

fi

rm -fr freshusa.txt my.patch

sleep 30

done

Wednesday 19 August 2015

Random Stuff 2

Calculate Response time of web page

#!/bin/sh

while true
do
script -c "time -p wget http://52.20.204.148/ -q --output-document=/dev/null 2>/tmp/tmp.log" -f /tmp/tmp.log > /dev/null
sleep 5
NUM=$(cat /tmp/tmp.log | grep "real" | cut -d " " -f2 | tr -cd '[:digit:].')
curl -X POST -d '[{"name":"foo","columns":["val"],"points":[['$NUM']]}]' "http://52.20.204.148:8086/db/server1/series?u=root&p=root"
done

========================================================================

Type of web server response per minute

#!/bin/bash

while true
do
#set -x
mkdir -p /home/ec2-user/tmp/percfile
rm -f /home/ec2-user/tmp/percfile/*
dd=`date +%d`
mon=`date +%m`
yea=`date +%Y`
for (( i = 1; i >0; i-- ))
do
grep $(date +%d/%b/%Y:%H:%M -d "-$i  min") /var/log/httpd/access_log >> /home/ec2-user/tmp/percfile/log1mins
GOOD=$(grep "200" /home/ec2-user/tmp/percfile/log1mins | wc -l)
BAD=$(grep -v "200" /home/ec2-user/tmp/percfile/log1mins | wc -l)
curl -X POST -d '[{"name":"response","columns":["good","bad"],"points":[['$GOOD','$BAD']]}]' "http://localhost:8086/db/server1/series?u=root&p=root"
done
sleep 1m
done

========================================================================

No of hits to a web server per second

#!/bin/sh
frequency=1
lastCount=`wc -l /var/log/httpd/access_log | sed 's/\([0-9]*\).*/\1/'`
while true
do
newCount=`wc -l /var/log/httpd/access_log | sed 's/\([0-9]*\).*/\1/'`
diff=$(( newCount - lastCount ))
rate=$(echo "$diff / $frequency" |bc -l)
#echo $rate
curl -X POST -d '[{"name":"hps","columns":["val"],"points":[['$rate']]}]' "http://localhost:8086/db/server1/series?u=root&p=root"
lastCount=$newCount
sleep $frequency
done

========================================================================

Gets your public IP

#!/bin/bash
while true
do
sleep 1
myip=$(curl wgetip.com 2> /dev/null) > /dev/null
sleep 1
curl -X POST -d '[{"name":"response","columns":["val1"],"points":[["'$myip'"]]}]' "http://$myip:8086/db/server1/series?u=root&p=root"
sleep 1
done

========================================================================

Service status in boolean

#!/bin/bash
while true
do
sleep 1
pidof  httpd >/dev/null
if [[ $? -eq 0 ]] ; then
curl -X POST -d '[{"name":"response","columns":["val2"],"points":[[0]]}]' "http://localhost:8086/db/server1/series?u=root&p=root"
else
curl -X POST -d '[{"name":"response","columns":["val2"],"points":[[1]]}]' "http://localhost:8086/db/server1/series?u=root&p=root"
sleep 1
fi
done

========================================================================

Type of web server response per minute

#!/bin/sh

#while true
#do
NOW=$( tail -n1 /var/log/httpd/access_log | head -1 | awk -F'[][]' '{ gsub(/\//," ",$2); sub(/:/," ",$2); "date +%s -d \""$2"\""|getline d; print d;}' )
REF=$(( $NOW - 60 ))
NUM=$REF
LINE=1
COUNT=0
BAD=0
while [ $NUM -ge $REF ]
do
NUM=$( tail -n$LINE /var/log/httpd/access_log | head -1 | awk -F'[][]' '{ gsub(/\//," ",$2); sub(/:/," ",$2); "date +%s -d \""$2"\""|getline d; print d;}' )
tail -n$LINE /var/log/httpd/access_log | head -1 | grep -i "200" >/dev/null && let COUNT=$COUNT+1
tail -n$LINE /var/log/httpd/access_log | head -1 | grep -v "200" >/dev/null && let BAD=$BAD+1
let LINE=$LINE+1
done
let COUNT=$COUNT-1
let BAD=BAD-1
curl -X POST -d '[{"name":"response","columns":["good","bad"],"points":[['$COUNT','$BAD']]}]' "http://localhost:8086/db/server1/series?u=root&p=root"
#sleep 15m
#done

========================================================================

Sql access through php

<?php
$servername = "localhost";
$username = "root";
$password = "qwe";
$dbname = "grafana";
$homepage = file_get_contents("/home/comb.txt");

// Create connection
$conn = mysqli_connect($servername, $username, $password, $dbname);
// Check connection
if (!$conn) {
    die("Connection failed: " . mysqli_connect_error());
}

$sql = "UPDATE dashboard SET data='$homepage' WHERE slug='dash2'";

if (mysqli_query($conn, $sql)) {
    echo "Record updated successfully";
} else {
    echo "Error updating record: " . mysqli_error($conn);
}

mysqli_close($conn);
?>

========================================================================


Run local script on remote server ( with root priviliges )

Parent Script :

#!/bin/bash
SOURCEFILE=$1
SOURCEIP=$2
TARGETIP=$3
FILE=$(basename $1)
echo "Enter the path"
read path
scp -3 -r -i /root/asw/Admaxim-23-06-2015.pem ec2-user@$SOURCEIP:$SOURCEFILE ec2-user@$TARGETIP:/tmp    
ssh -t -t -i /root/asw/Admaxim-23-06-2015.pem ec2-user@$TARGETIP sudo -i "bash -s" -- < ./remold.sh "$FILE" "$path"
exit


Child Script :

#!/bin/bash
cd /tmp
yes | cp -r $1 $2
rm -fr $1
exit
exit

========================================================================

SSH into a remote server with non root account with login key and go root automatically :


#!/bin/bash
ssh -t -t -i /root/asw/keyfile.pem user@$1 sudo -i "bash -s"

========================================================================

Pass defined column value to loop  :


#!/bin/bash

while read server
do
echo ${server}
done < final | awk '{print $1}'

========================================================================

DB creation :


curl -X POST "http://localhost:8086/db?u=root&p=root" -d '{"name": "'${server}'"}'

========================================================================

Random Stuff

This is the bash script

#!/bin/bash

NOW=$( tail -n1 log1 | awk -F'[][]' '{ gsub(/\//," ",$2); sub(/:/," ",$2); "date +%s -d \""$2"\""|getline d; print d;}' ) ## get last line timestamp
REF=$(( $NOW - 15*60 ))  ##previous 15 minute
NUM=$REF
LINE=1
COUNT=0
while [ $NUM -ge $REF ] ## Run until the condition is not met
do
NUM=$( tail -n$LINE log1 | head -1 | awk -F'[][]' '{ gsub(/\//," ",$2); sub(/:/," ",$2); "date +%s -d \""$2"\""|getline d; print d;}' ) ## pass value of each time stamp and compare with the while condition
tail -n$LINE log1 | head -1 | grep -i "ready" && let COUNT=$COUNT+1  ## count the number of ready occurrence
let LINE=$LINE+1 ## for backward iteration
done
echo "Total Error = "$COUNT" " ## display count

The log file used is "log1"

10.183.253.51 - - [18/Mar/2013:22:25:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:26:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:27:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:28:00 -0700] GET /adserver/ready HTTP/1.1 200 1 0
10.183.253.51 - - [18/Mar/2013:22:29:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:30:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:31:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:32:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:33:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:34:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:35:00 -0700] GET /adserver/ready HTTP/1.1 200 1 0
10.183.253.51 - - [18/Mar/2013:22:36:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:37:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:38:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:39:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:40:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:41:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:42:00 -0700] GET /adserver/ready HTTP/1.1 200 1 0
10.183.253.51 - - [18/Mar/2013:22:43:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:44:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:45:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:46:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:47:00 -0700] GET /adserver/ready HTTP/1.1 200 1 0
10.183.253.51 - - [18/Mar/2013:22:48:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:49:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:50:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:51:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:52:00 -0700] GET /adserver/ready HTTP/1.1 200 1 0
10.183.253.51 - - [18/Mar/2013:22:53:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:54:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1
10.183.253.51 - - [18/Mar/2013:22:55:00 -0700] POST /adserver/getOpenRTBSmaatoInfo HTTP/1.1 204 - 1

x-----------------------------------------------------------------------------------------------------------------------x

This is the bash script

#!/bin/bash

NOW=$( tail -n1 log2 | awk -F'[][]' '{ gsub(/\//," ",$2); sub(/:/," ",$2); "date +%s -d \""$2"\""|getline d; print d;}' ) ## take first time instance ( user specified can also be parsed )
REF=$(( $NOW - 15*60 ))  ##previous 15 minute PN. second time instance ( user specified can also be parsed )
NUM=$REF
LINE=1
while [ $NUM -ge $REF ]
do
NUM=$( tail -n$LINE log2 | head -1 | awk -F'[][]' '{ gsub(/\//," ",$2); sub(/:/," ",$2); "date +%s -d \""$2"\""|getline d; print d;}' )
tail -n$LINE log2 | head -1 | grep -v "click" | cut -d "=" -f4 | cut -d " " -f1 >> number.txt    ## Values retrieved from the log are temporarily stored here
SUM=$(awk '{ SUM += $1} END { printf "%.2f",  SUM }' number.txt) ## values are retireved and added up gradually step by step
let LINE=$LINE+1 ## backward iteration for faster processing
done
rm -fr number.txt ## external temporary file created has been removed
echo "The Sum of Bit is "$SUM"" ##  print the sum

The log file used is "log2"

10.183.252.22 - - [16/Mar/2013:00:00:06 -0700] GET /adtracker/track/track/imp?id=11062&cpnsite_id=23709&bid=0.3 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:01:06 -0700] GET /adtracker/track/track/imp?id=11390&cpnsite_id=23609&bid=0.2 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:02:06 -0700] GET /adtracker/track/track/imp?id=11390&cpnsite_id=23609&bid=0.2 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:03:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:04:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:05:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:06:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:07:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:08:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:09:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:10:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:11:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:12:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:13:06 -0700] GET /adtracker/track/track/imp?id=11390&cpnsite_id=23609&bid=0.2 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:14:06 -0700] GET /adtracker/track/track/imp?id=11390&cpnsite_id=23609&bid=0.2 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:15:06 -0700] GET /adtracker/track/track/imp?id=11390&cpnsite_id=23609&bid=0.2 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:16:06 -0700] GET /adtracker/track/track/imp?id=11390&cpnsite_id=23609&bid=0.2 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:17:06 -0700] GET /adtracker/track/track/imp?id=11390&cpnsite_id=23609&bid=0.2 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:18:06 -0700] GET /adtracker/track/track/imp?id=11390&cpnsite_id=23609&bid=0.2 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:19:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:20:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:21:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:22:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:23:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:24:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:26:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:27:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:28:06 -0700] GET /adtracker/track/track/imp?id=11390&cpnsite_id=23609&bid=0.2 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:29:06 -0700] GET /adtracker/track/track/imp?id=11390&cpnsite_id=23609&bid=0.2 HTTP/1.1 3
10.183.252.22 - - [16/Mar/2013:00:30:06 -0700] GET /adtracker/track/track/click?id=11062 HTTP/1.1 3

x-----------------------------------------------------------------------------------------------------------------------x

Just enter the below command at the shell

# cat log2 | grep -v "click" | cut -d "?" -f2 | cut -d "&" -f1

The log file used is "log2"

All ready given above.

x-----------------------------------------------------------------------------------------------------------------------x

To increase the file descriptor limit, edit the kernel parameter file /etc/sysctl.conf.

# nano /etc/sysctl.conf

fs.file-max = 600000

Apply the changes without a reboot:

# sysctl -p

To check the new limit :

# more /proc/sys/fs/file-max

x-----------------------------------------------------------------------------------------------------------------------x

Sync Time between servers

Option 1

# date --set="$(ssh user@server date)"

Or

# ssh user@host "date --set \"$(date)\""

manually on all three servers

Option 2

‘ntpdate 192.168.1.1’ to be done on each server

or add

server 192.168.1.1 prefer to /etc/ntp.conf on each server

although we will require a self hosted NTP server.

Option 3

Set all 3 server timezone to same timezone and use ntpdate pool.ntp.org to get them adjusted to same time.
Offset of the servers with each other can be checked by running command ntpq -p and by adding each servers ip's between them.
The offset is usually in milli-seconds and that too because of latency.

x-----------------------------------------------------------------------------------------------------------------------x

Change Timezone of a machine

# cp /etc/localtime /root/old.timezone
# rm /etc/localtime
# ln -s /usr/share/zoneinfo/America/New_York /etc/localtime
# nano /etc/sysconfig/clock  /* to make the change permanent */

ZONE=”America/New_York”  /* change value according to your need */

reboot the machine and then :

# date

or

# ntpdate pool.ntp.org     /* NTP service should be installed and running */

x-----------------------------------------------------------------------------------------------------------------------x

We are assuming here that all 5 servers are already added in known host and id_rsa.pib file has been transferred from sender to each receiving server and added in ~/.ssh/authorized_keys. Also root user is allowed login for all servers.

This is the bash script

#/usr/bin/bash

# There are three variables accepted via commandline
# $1 = first parameter (/source_path/source_filename)
# $2 = second parameter (/target_directory/)
# $3 = third paramter (file that contains list of hosts)

SOURCEFILE=$1
TARGETDIR=$2
HOSTFILE=$3

if [ -f $SOURCEFILE ] ## check for file existence
then
   printf "File found, preparing to transfer\n"
   while read server  ## initialize array
   do
      scp -vp $SOURCEFILE ${server}:$TARGETDIR  ## do transfer accordingly
   done < $HOSTFILE ## list of ip's which are stored in an external file
else
   printf "File \"$SOURCEFILE\" not found\n"
   exit 0
fi
exit 0

Command at prompt # ./scp.sh /file/to/be/transferred /destination/of/file/to/be/ /directory/of/ip/list/network.txt

The file used is "network.txt" for IP list

192.168.119.135
192.168.119.136
192.168.119.137
192.168.119.138
192.168.119.139

x-----------------------------------------------------------------------------------------------------------------------x

AWS Spot Pricing

This is done in 2 scripts :

./spothead.sh

#!/bin/bash

n=$(date "+%H:%M:%S")
while read server
do
#var=$(. ./spotprice.sh "${server}")
#echo "AFTER: $var"
./spotprice.sh "${server}"
done < types.txt
echo ""
printf "PN: -  TOP most value shows the SMOOTHEST graph  -\n    -   Rest of the values follows accordingly   -\n    -        LAST value represent WORST!         -"
echo ""
echo ""
echo "-------------------------------------------------------"
printf "|   Gausian Curve    |  Current Price  |    Server    | \n"
printf "| (top value = best) |  "%s\ at" "$n"   |    (Type)    | \n"
echo "-------------------------------------------------------"
cat merged.txt | sort -n
echo ""
rm -fr merged.txt

========================================================================


./spotprice.sh

#!/bin/bash

NOW=$(date "+%Y-%m-%dT%H:%M:%S")
REF=$(date -d "-1 days" +"%Y-%m-%dT%H:%M:%S")
aws ec2 describe-spot-price-history --instance-types $1 --availability-zone us-east-1e --product-description "Linux/UNIX (Amazon VPC)" --start-time $REF --end-time $NOW --query 'SpotPriceHistory[*].[SpotPrice]' --output text >> number.txt
SUM=$(awk '{delta = $1 - avg; avg += delta / NR; mean2 += delta * ($1 - avg); } END { printf "%0.7f \n" , sqrt(mean2 / NR); }' number.txt)
CP=$(tail -1 number.txt)
echo "      $SUM          $CP        $1" >> merged.txt
rm -fr number.txt


That will get the job done!!

Access Log Manipulation

#!/bin/bash

tail -n1 /var/log/httpd/access_log | awk '{print $4}' | while read line
do
in=${line}
#echo "Input: $in"
rfc_form="${in:1:2}-${in:4:3}-${in:8:4} UTC ${in:13:2}:${in:16:2}:${in:19:2}"
#echo "Converted: $rfc_form"
epoch_time=$(date -d "$rfc_form" +%s)
echo "Only Seconds: $epoch_time"
#date -d "$rfc_form"
date -d @$epoch_time
done

One more technique:

echo "18-06:25:34 " | awk -F: '{ print ($1*24*3600) + ($2*3600) + ($3*60) + $4 }'

========================================================================

Take this log file :

[
    [
        "52.4.30.109"
    ], 
    [
        "54.210.143.237"
    ], 
    [], 
    [], 
    [
        "54.210.164.230"
    ], 
    [], 
    [], 
    [
        "54.210.247.55"
    ], 
  
    [
        "54.210.162.121"
    ], 
    [
        "54.210.145.45"
    ], 
    [], 
    [
        "107.23.2.112"
    ], 
    [], 
    [], 
    [
        "52.7.146.177"
    ], 
    [
        "54.210.222.151"
    ], 
    [
        "54.210.170.90"
    ], 
    [], 
    [
        "54.210.163.107"
    ], 
    [
        "54.210.171.244"
    ], 
    [
        "52.7.33.54"
    ], 
    [
        "54.210.192.121"
    ], 
    []
]


Apply this :

cat new.txt | awk '{$1=$1}{ print }' | sed 's/[][]//g;s/,//g;s/"//g' | sed '/^$/d'

Expect Magic !!

Full Script:

#!/bin/bash

rm -fr new*
rm -fr fresh*

aws ec2 --region eu-central-1 describe-instances --query 'Reservations[*].Instances[*].PublicIpAddress' >> neweu.txt
aws ec2 --region us-east-1 describe-instances --query 'Reservations[*].Instances[*].PublicIpAddress' >> newusa.txt

cat neweu.txt | awk '{$1=$1}{ print }' | sed 's/[][]//g;s/,//g;s/"//g' | sed '/^$/d' >> fresheu.txt
cat newusa.txt | awk '{$1=$1}{ print }' | sed 's/[][]//g;s/,//g;s/"//g' | sed '/^$/d' >> freshusa.txt

counteu=$(cat fresheu.txt | wc -l)
countusa=$(cat freshusa.txt | wc -l)

echo "EU count is $counteu"
echo "USA count is $countusa"

rm -fr new*
rm -fr fresh*

OR do this simply:

aws ec2 --region eu-central-1 describe-instances --query 'Reservations[*].Instances[*].[PublicIpAddress, Tags[0].Value]' --output text | sort -k 2

aws ec2 --region us-west-1 describe-instances --query 'Reservations[*].Instances[*].[PublicIpAddress, Tags[0].Value]' --output text | sort -k 2

Thursday 13 August 2015

AWS Cli Commands

Create Instance:


aws ec2 run-instances --image-id ami-1ecae776 --count 1 --instance-type t2.micro --key-name XXXXX --security-group-ids sg-xxxxxxx --subnet-id subnet-xxxxxx --query 'Instances[0].InstanceId'

Name Instance:


aws ec2 create-tags --resources i-ffc16754 --tags Key=Name,Value=Test-Naveed


Delete Instance:


aws ec2 terminate-instances --instance-ids i-b1c5e919

All in One!


yum install jq

aws ec2 create-tags --resources `aws ec2 run-instances --image-id ami-3d50120d --instance-type t2.small --subnet-id subnet-xxxxxxx --security-group-ids sg-xxxxxxxx --key-name "MyKey" | jq -r ".Instances[0].InstanceId"` --tags "Key=Name,Value=development_webserver"

Thursday 30 July 2015

Web Server Statistic Calculation

#!/bin/sh

usage="Usage: `basename $0` -f <frequency in seconds, min 1, default 60> -l <log file>"

# Set up options
while getopts ":l:f:" options; do
 case $options in
 l ) logFile=$OPTARG;;
 f ) frequency=$OPTARG;;
 \? ) echo -e $usage
  exit 1;;
 * ) echo -e $usage
  exit 1;;

 esac
done

# Test for logFile
if [  ! -n "$logFile" ]
then
 echo -e $usage
 exit 1
fi

# Test for frequency
if [  ! -n "$frequency" ]
then
 frequency=60
fi

# Test that frequency is an integer
if [  $frequency -eq $frequency 2> /dev/null ]
then
 :
else
 echo -e $usage
 exit 3
fi

# Test that frequency is an integer
if [  $frequency -lt 1 ]
then
 echo -e $usage
 exit 3
fi

if [ ! -e "$logFile" ]
then
 echo "$logFile does not exist."
 echo
 echo -e $usage
 exit 2
fi

NOW=$(tail -n1 $logFile | awk -F'[][]' '{ gsub(/\//," ",$2); sub(/:/," ",$2); "date +%s -d \""$2"\""|getline d; print d;}')
REF=$(($NOW - 1*60))
NUM=$REF
LINE=1
COUNT=0
BAD=0
lastCount=`wc -l $logFile | sed 's/\([0-9]*\).*/\1/'`
while true
do
while [ $NUM -ge $REF ]
do
 NUM=$( tail -n$LINE $logFile | head -1 | awk -F'[][]' '{ gsub(/\//," ",$2); sub(/:/," ",$2); "date +%s -d \""$2"\""|getline d; print d;}' )
 tail -n$LINE /var/log/httpd/access_log | head -1 | grep -i "200" >/dev/null && let COUNT=$COUNT+1
 tail -n$LINE /var/log/httpd/access_log | head -1 | grep -v "200" >/dev/null && let BAD=$BAD+1
 let LINE=$LINE+1
done
 let COUNT=$COUNT-1
 let BAD=$BAD-1
 curl -X POST -d '[{"name":"response","columns":["val1","val2"],"points":[['$COUNT','$BAD]]}]' "http://localhost:8086/db/server1/series?u=root&p=root"
 newCount=`wc -l $logFile | sed 's/\([0-9]*\).*/\1/'`
 diff=$(( newCount - lastCount ))
 rate=$(echo "$diff / $frequency" |bc -l)
 #echo $rate
 curl -X POST -d '[{"name":"hps","columns":["val"],"points":[['$rate']]}]' "http://localhost:8086/db/server1/series?u=root&p=root"
 lastCount=$newCount
 sleep $frequency
done

Friday 29 May 2015

Configuring SSL for Apache on Debian or Ubuntu

As root, Navigate to /path/to/certs        

Note: Default for Debian and Ubuntu is /etc/ssl/certs/
$ cd /path/to/certs      

To generate .CSR key to be signed by provider with Apache 2 and OpenSSL use the following command:

$ openssl req -new -newkey rsa:2048 -nodes -keyout example.com.key -out example.com.csr

Below are explanations for the values that you should provide.

Country Code: For this question, we will want to supply the 2-digit ISO abbreviation for your country.  If you’re in the United States, then your 2-digit ISO abbreviation will be US.
State or Province Name: This should be the full name of the state or province where your organization is located.  Do not abbreviate the name, you must use the full name.
Locality Name(city): This will be the town or city where your organization is located.  If your location is basedin Mountain View, CA, then your locality name would be Mountain View.
Organization Name: This should be the legal name of your organization.  If your organization is Example, LLC, then your CSR’s organization should be Example, LLC.
Organization Unit: This value should reflect the section of the section of your organization, such as accounting, marketing, billing, Information Technology, etc.
Common Name: This would be the fully qualified domain name for your website, for example if your website is https://www.example.com then your CSR’s common name should be www.example.com.
Email Address: An email address that can be used to contact your organization.
 
Note: You will be prompted to supply ‘extra’ attributes.  It is in most cases advised to leave these fields  blank, you can do so by just pressing enter at the prompt.
Once the files have been generated, we will need to print the the contents of example.com.csr by using the cat command.  This will generate an encrypted signature preceded by -----BEGIN CERTIFICATE REQUEST-----

and followed by -----BEGIN CERTIFICATE REQUEST-----, we will need to copy the contents of the file in its entirety into your ssl providers web ui.

Once you submit the contents of your example.com.csr file, you will be able to download a package containing (2) files: Example.com.crt, and provider_bundle.crt.”

Note: If you’re using GoDaddy your “provider_bundle.crt” file may be called either “gd_bundle.crt” or “sf_bundle.crt.”
Download and unzip the signed certificate, and move the contents of the .zip file into /path/to/certs.

Once both files have been placed in the /path/to/certs directory, you then must next modify your Apache Virtual Host to reflect the signed certificate.

If your are adding SSL encryption to a pre-existing site, odds are you already have the first Virtual Host entry, however for this tutorial we will focus on the second entry for port 443.  Below is an example of how your the virtual host file for your website should appear:

<VirtualHost *:80>
     ServerAdmin example@example.com
     ServerName www.example.com
     ServerAlias example.com
     DocumentRoot /path/to/example.com/public_html
     ErrorLog /path/to/example.com/logs/error.log
     CustomLog /path/to/example.com/access.log combined
</VirtualHost>


<VirtualHost *:443>
     SSLEngine On
     SSLCertificateFile /path/to/certs/Example.com.crt
     SSLCertificateKeyFile /path/to/certs/example.key
     SSLCACertificateFile /path/to/certs/sf_bundle.crt

     ServerAdmin example@example.com
     ServerName www.example.com
     DocumentRoot /srv/www/example.com/public_html/
     ErrorLog /path/to/example.com/logs/error.log
     CustomLog /path/to/example.com/logs/access.log combined
</VirtualHost>

Enable SSL Module

$ a2enmod ssl

Reload Apache to Update the Changes

$ /etc/init.d/apache2 reload

Thursday 21 May 2015

Debian Package building.... more efficiently !

First make sure you have the deb-src repositories in sources.list:

  $ grep deb-src /etc/apt/sources.list
  deb-src http://security.ubuntu.com/ubuntu precise-security main restricted
  deb-src http://security.ubuntu.com/ubuntu precise-security universe
  deb-src http://security.ubuntu.com/ubuntu precise-security multiverse
  deb-src http://archive.canonical.com/ubuntu precise partner
  deb-src http://extras.ubuntu.com/ubuntu precise main
  deb-src http://br.archive.ubuntu.com/ubuntu/ubuntu/ precise main restricted
  deb-src http://br.archive.ubuntu.com/ubuntu/ubuntu/ precise-updates main restricted
  deb-src http://br.archive.ubuntu.com/ubuntu/ubuntu/ precise universe
  deb-src http://br.archive.ubuntu.com/ubuntu/ubuntu/ precise-updates universe
  deb-src http://br.archive.ubuntu.com/ubuntu/ubuntu/ precise multiverse
  deb-src http://br.archive.ubuntu.com/ubuntu/ubuntu/ precise-updates multiverse
  deb-src http://br.archive.ubuntu.com/ubuntu/ precise-security main restricted
  deb-src http://br.archive.ubuntu.com/ubuntu/ precise-security universe
  deb-src http://br.archive.ubuntu.com/ubuntu/ precise-security multiverse

If not, add these lines to /etc/apt/sources.list and run

  # apt-get update

After that you can get the wireshark sources. The files will be downloaded
to the current directory.

  $ apt-get source wireshark

This will download the wireshark source, unpack the tarball and copy the
"debian" diretory into it:

  $ cd wireshark-1.6.7

All further steps assume you are in this directory.

In this directory, you can find the "debian" subdirectory, which conntains
the build and installation instructions for a debian package. It also
contains a "debian/patches" directory which contains the patches that
debian and/or ubuntu apply to the original sources.

  $ ls debian/patches
  00list                                     06_release-version.patch
  01_idl2deb.patch                           07_use-theme-icon.patch
  02_asn2deb.patch                           08_wireshark-desktop-menu.patch
  03_preferences.dpatch                      09_idl2wrs.patch
  03_preferences.patch                       16_licence_about_location.patch
  04_asn2wrs_ply.patch                       series
  05_note-README-when-running-as-root.patch

We will create a patch that fits in this hierarchy using the tool called
"quilt":

  # apt-get install quilt

The quilt patch system manages a stack of patches to the original source.
Since we want to add a new patch, we must first apply all patches contained
in the package.

  $ export QUILT_PATCHES=debian/patches
  $ quilt push -a
  File series fully applied, ends at patch 16_licence_about_location.patch

Now we can create our patch. We'll prefix with with "17_" since the last
patch in debian/patches is prefixed with "16_".

  $ quilt new 17_readme.patch
  Patch 17_readme.patch is now on top
  $ quilt add README          # do this for every file you edit
  File README added to patch 17_readme.patch
  $ echo 'This wireshark has XIA support' >> README
  $ quilt refresh
  Refreshed patch 17_readme.patch
  $ quilt pop -a

That last command will undo all the applied patches, including yours, and
leave the sources clean. You can see that your patch was added to the patch
system:

  $ ls debian/patches
  00list   06_release-version.patch
  01_idl2deb.patch                           07_use-theme-icon.patch
  02_asn2deb.patch                           08_wireshark-desktop-menu.patch
  03_preferences.dpatch                      09_idl2wrs.patch
  03_preferences.patch                       16_licence_about_location.patch
  04_asn2wrs_ply.patch                       17_readme.patch
  05_note-README-when-running-as-root.patch  series

  $ cat debian/patches/series
  01_idl2deb.patch
  02_asn2deb.patch
  03_preferences.patch
  04_asn2wrs_ply.patch
  05_note-README-when-running-as-root.patch
  06_release-version.patch
  07_use-theme-icon.patch
  08_wireshark-desktop-menu.patch
  09_idl2wrs.patch
  16_licence_about_location.patch
  17_readme.patch

You can now build the package, but first you need to install its build dependencies.

  # apt-get build-dep wireshark

If you decide to rename the package (say, to "wireshark-xia"), you must do so
in two places: first, add a new entry to the debian/changelog file (be sure
to respect the indentation; it must be exactly the same as in the other
entries). In your entry, instead of using "wireshark" as the package name
in the first line of the changelog entry, change it to "wireshark-xia". The
changelog can be edited by using the dch utility. To add a new entry to the
changelog, issue:

  $ dch -i

Here you can rename the package, change the version, add in your name/email
address, and provide a synopsis of changes made.

Second, edit the debian/control file and change the "Source:" header to
"wireshark-xia" and change the "Package:" headers to add the new name. For
example, change "Package: wireshark-common" to "Package: wireshark-xia-common".
Note that some libraries are also built from this package (eg. libwireshark1,
libwiretap1). If you rename those too, be sure to replace the old name globally
in the control file, because some of the packages depend on those libraries,
and the name listed in the "Depends:" header must match with the one declared
in the "Package:" header. If you do change the package naming scheme, you will
have to change the name of the original tarball in order to execute the debuild
step.

Finally, you can now generate the debian package:

  # apt-get install devscripts
  $ debuild -uc -us

When this command finishes, you'll find the *.deb files in the parent
directory. They can be installed normally with "dpkg -i <package>.deb".

Friday 9 January 2015

Asterisk 11.14 Patching Steps

Get Dependencies :

# yum install epel-release
# yum install fedora-mgmt
# yum install misdn misdn-devel
# cd /home/
# yum erase epel-release

Prepare the build environment :

# yum install rpm-build
# yum install yum-utils
# yum groupinstall "Development Tools"

I will use the asterisk SRPM :

# cd /home/
# mkdir -p ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
# echo '%_topdir %(echo $HOME)/rpmbuild' > ~/.rpmmacros
# rpm --nomd5 -ivh asterisk-11.14.1-1.src.rpm
# rpm --nomd5 -ivh mISDNuser-2.0.17-9.1.src.rpm
# cd /root/rpmbuild/SPECS

# yum-builddep asterisk11.spec
# yum-builddep misdnuser.spec
# rpmbuild -bp misdnuser.spec
# rpmbuild -ba misdnuser.spec
# cd /root/rpmbuild/RPMS/x86_64
# rpm -Uvh misdnuser*.rpm
# rpmbuild -bp asterisk11.spec

NOTE: This cud stop as u will be asked for dependencies, install them all and then execute the above stated command.

This will build and patch the file. this is where our patch is gonna come up, move to BUILD folder.

# cd /root/rpmbuild/BUILD

You will see a package file like this one :

# ls
asterisk-11.14.1

Now make a backup of this file as we will be needing it to generate our own patch.

# cp -r asterisk-11.14.1/ asterisk-11.14.1-orig

This will result in two file in the BUILD folder, like this :

# ls
asterisk-11.14.1  asterisk-11.14.1-orig

Now move into ur package file

# cd /asterisk-11.14.1

Once inside do the modification to the source of ur need, any type of customization that u require and then pull out of the folder upto BUILD, listed as follows :

# cd /root/rpmbuild/BUILD

Now we make our own patch :

# diff -Naur asterisk-11.14.1-orig/ asterisk-11.14.1/ > my.patch

This will generate our patch file, move it to the SOURCES folder,

diff -Naur asterisk-11.14.1-orig/channels/chan_sip.c asterisk-11.14.1/channels/chan_sip.c
--- asterisk-11.14.1-orig/channels/chan_sip.c 2014-11-21 17:00:38.000000000 +0500
+++ asterisk-11.14.1/channels/chan_sip.c 2014-11-21 17:04:29.000000000 +0500
@@ -7823,7 +7823,7 @@
     We also check for vrtp. If it's not there, we are not allowed do any video anyway.
   */
  if (i->vrtp) {
- if (ast_test_flag(&i->flags[1], SIP_PAGE2_VIDEOSUPPORT))
+ if (ast_test_flag(&i->flags[1], SIP_PAGE2_VIDEOSUPPORT_ALWAYS))
  needvideo = 1;
  else if (!ast_format_cap_is_empty(i->prefcaps))
  needvideo = ast_format_cap_has_type(i->prefcaps, AST_FORMAT_TYPE_VIDEO); /* Outbound call */
@@ -7870,6 +7870,11 @@
  ast_channel_set_fd(tmp, 2, ast_rtp_instance_fd(i->vrtp, 0));
  ast_channel_set_fd(tmp, 3, ast_rtp_instance_fd(i->vrtp, 1));
  }
+ else if (i->vrtp) {
+ // Properly disable video if not needed
+ ast_rtp_instance_destroy(i->vrtp);
+ i->vrtp = NULL;
+ }
  if (needtext && i->trtp) {
  ast_channel_set_fd(tmp, 4, ast_rtp_instance_fd(i->trtp, 0));
  }

# mv my.patch /root/rpmbuild/SOURCES

Now we edit the spec file again and tell the spec file of our patch,

# cd /root/rpmbuild/SPECS
# nano asterisk11.spec

You will find two point at which editing will be required, for example in this case scenario the first one will be like :

....../.../.../.....
Patch09: asterisk-11.3.0-xorcom-busydetect-05-cap-limit-threshold.patch
Patch10: asterisk-11.3.0-xorcom-busydetect-06-dahdi-config-options-busydetect.patch
Patch11: asterisk-11.5.1-chan_allogsm-2.0.7-v2.patch
Patch12: asterisk-11.11.0-srtp-lifetime.patch
Patch13: my.patch

Notice how i placed the patch number 13, this is what we will be adding to spec file. One more entry is required to tell how to apply patch which we will put in like this :

....../.../.../.....
%patch09 -p1
%patch10 -p1
%patch11 -p1
%patch12 -p1
%patch13 -p1

Again indicated in red is the entry which i did to the spec file, now save the spec file and quit it. Now comes the best part :

# rpmbuild -bp asterisk.spec

This will apply your patch and your modded source is now available, now we build our source so that we can install it, this can be done very easily,

# rpmbuild -ba asterisk.spec

This will generate your rpm which will be located in the RPMS folder under your architecture, according to my case it was in :

# cd /root/rpmbuild/RPMS/x86_64

Now to install it we just do this :

# rpm -Uvh asterisk*.rpm

If you get conflicting errors try this :

# rpm -Uvh -force asterisk*.rpm

This marks the end of our tutorial.