[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Fwd: Re: [ns] why delay is so large?




----------  Forwarded Message  ----------
Subject: Fwd: Re: [ns] why delay is so large?
Date: Tue, 6 Jun 2000 16:00:24 -0800
From: Anaking <[email protected]>


Hi, dear users,

I forward this message to you, and attach my scripts.
I use WF2Q algorithm. You can try WFQ too, the results is similiar. 
The delay seen by each flow is rather large.

I hope you can help me at this.

Thanks.

----------  Forwarded Message  ----------
Subject: Re: [ns] why delay is so large?
Date: Tue, 6 Jun 2000 14:16:09 -0800
From: Anaking <[email protected]>


Hi, Tan,

Still previous topic, now the data rate is 64kbps and link rate is
640kbps. According to WFQ delay bound derivation, Di=Lp*(1+Sum(wi)/wi)/C, that 
means delay will be bound to 53x8x(1+10)/640k, approxi to 7ms. However, the
delay in simulation is even over 60ms. Is it normal? I can't figure it out yet.

I have tried both wf2q and wfq algorithm released in ns-contribution. The
results is similiar. 

Please help me.


# CBR			  null0	
#      \fid_=0 		/
# CBR --n0====WFQ=====n1--null1
#      /fid_=2          \
# CBR   		 null2
# to observe if WFQ can maintain delay bound.if CAC not consider
# delay requirement, more connection will be admitted into.
# so display the delay bound violation probability.

source wf2qutils.tcl
set ns [new Simulator]

ns-random 0

set n0 [$ns node]
set n1 [$ns node]

set f0 [open out0.tr w]
set f1 [open out1.tr w]
set f2 [open out2.tr w]

set tf [open out.tr w]
$ns trace-all $tf
set df [open temp.q w]

$ns simplex-link $n0 $n1 640Kb 10ms WF2Q

# set sizes of queues allocated to  flows 0 and 1 at the output
# port of n0 to 5000 bytes
# 
# NOTE: first we get a handle to the RR queue discipline
set q [$ns get-queue $n0 $n1]

for {set i 0} {$i < 10} {incr i} {
	global q
	$q set-queue-size $i 2650
	# Set the weight of the flows
	$q set-flow-weight $i 1
	}
#$q set-queue-size 0 2650
#$q set-flow-weight 0 0.90

set fmon [$ns makeflowmon Fid]
$ns attach-fmon [$ns link $n0 $n1] $fmon

for {set i 0} {$i < 10} {incr i} {
	build-udp $n0 $n1 53 0.006625 $i 1.0 10.0
}

# $ns at 10.0 "Bfinish"
# $ns at 2.0 "cdumpdel"
# $ns at 4.0 "cdumpdel"
 
# $ns at 6.0 "cdumpdel"
 $ns at 8.0 "cdumpdel"

#$ns at 10.0 "Bfinish; Pfinish Output"
$ns at 10.0 "Pfinish Output"
$ns at 10.0 "exit 0"


# To display the throupput of several flows
proc Bfinish {} {
	global ns f0 f1	f2
	close $f0 
	close $f1
	close $f2
	#Call xgraph to display the results
	exec xgraph out0.tr out1.tr out2.tr -geometry 800x400 &
}

# To display the packets dispatched of flows
proc Pfinish file {
	
	set f [open temp.rands w]
	puts $f "TitleText: $file"
	puts $f "Device: Postscript"
	
	exec rm -f temp.p temp.d 
	exec touch temp.d temp.p
	#
	# split queue/drop events into two separate files.
	# we don't bother checking for the link we're interested in
	# since we know only such events are in our trace file
	#
	exec awk {
		{
			if (($1 == "+" || $1 == "-" ) && \
			    ($5 == "cbr"))\
					print $2, $8 + ($11 % 90) * 0.01
		}
	} out.tr > temp.p
	exec awk {
		{
			if ($1 == "d")
				print $2, $8 + ($11 % 90) * 0.01
		}
	} out.tr > temp.d

	puts $f \"packets
	flush $f
	exec cat temp.p >@ $f
	flush $f
	# insert dummy data sets so we get X's for marks in data-set 4
	puts $f [format "\n\"skip-1\n0 1\n\n\"skip-2\n0 1\n\n"]

	puts $f \"drops
	flush $f
	#
	# Repeat the first line twice in the drops file because
	# often we have only one drop and xgraph won't print marks
	# for data sets with only one point.
	#
	exec head -1 temp.d >@ $f
	exec cat temp.d >@ $f
	close $f
	exec xgraph -bb -tk -nl -m -x time -y packet temp.rands &

	# dump the highest seqno sent of each tcp agent
	# this gives an idea of throughput
	set k 1
	while 1 {
		global tcp$k
		if [info exists tcp$k] {
			set tcp [set tcp$k]
			puts "tcp$k seqno [$tcp set t_seqno_]"
		} else {
			break
		}
		incr k
	}
	exit 0
}

# To display average queueing delay 
proc cdumpdel {} {
	  global ns fmon df
	  set now [$ns now]
	  set fcl [$fmon classifier]
	
	  set fids { 0 1 2 }
	  foreach i $fids {
		  set flow [$fcl lookup auto 0 0 $i]
		  if { $flow != "" } {
			  set dsamp [$flow get-delay-samples]
			  puts "dumping delay"
			  puts $df "$now $i [$dsamp mean]"
		  } else { puts $df "fuck nothing" }
	  }
	close $df
}

$ns run

#
# Carnegie Mellon University
# 1999-2000
#
# Shahzad Ali ([email protected])
#
# File implementing various procedures (e.g., creating TCP and UDP flows)

# Create a TCP flow 
# - type - TCP type (TCP is by default TCP Tahoe) 
# - src - TCP's source node
# - dst - TCP's destination node
# - pktSize - packet size
# - window  - maximum window size; if window is passed as 0, then
#             the default window size is set to 20 
# - flowid    - id associated to the TCP flow
# - startTime - time when the TCP source starts transmitting
# - stopTime  - time when the TCP source stops transmitting

proc build-tcp { type src dest pktSize window flowid startTime stopTime } {
    global ns

    # build tcp source
    if { $type == "TCP" } {
      set tcp [new Agent/TCP]
      set snk [new Agent/TCPSink]
    } elseif { $type == "Reno" } {
      set tcp [new Agent/TCP/Reno]
      set snk [new Agent/TCPSink]
    } elseif { $type == "Sack" } {
      set tcp [new Agent/TCP/Sack1]
      set snk [new Agent/TCPSink/Sack1]
    } elseif  { $type == "Newreno" } {
      set tcp [new Agent/TCP/Newreno]
      set snk [new Agent/TCPSink]
    } else {
      puts "ERROR: Inavlid tcp type"
    }
    $ns attach-agent $src $tcp

    #build tcp sink
    $ns attach-agent $dest $snk

    # connect source to sink
    $ns connect $tcp $snk

    # init. tcp parameters
    if { $pktSize > 0 } {
      $tcp set packetSize_ $pktSize
    }
    $tcp set class_ $flowid
    if { $window > 0 } {
      $tcp set window_ $window
    } else {
      # default in ns-2 version 2.0
      $tcp set window_ 20
    }

    set ftp [new Source/FTP]
    $ftp set agent_ $tcp
    $ns at $startTime "$ftp start"
    $ns at $stopTime "$ftp stop"

    return $tcp
}

# Create an UDP flow 
# - src - UDP's source node
# - dst - UDP's destination node
# - pktSize  - packet size
# - interval - the average interval between two packets (seconds)
# - flowid    - id associated to the UDP flow
# - startTime - time when the UDP source starts transmitting
# - stopTime  - time when the UDP source stops transmitting

proc build-udp { src dest pktSize interval id startTime stopTime} {
    global ns

    # build udp source
    
    set udp [new Agent/UDP]
    $ns attach-agent $src $udp
    set cbr [new Application/Traffic/CBR]
    $cbr attach-agent $udp

    #build cbr sink
    set null [new Agent/LossMonitor]
    $ns attach-agent $dest $null

    #connect cbr sink to cbr null
    $ns connect $udp $null

    # init. cbr parameters
    if {$pktSize > 0} {  
        $cbr set packet_size_ $pktSize
    }
    $udp set fid_      $id
    $cbr set interval_ $interval
    $cbr set random_   1
    $ns at $startTime "$cbr start"
    $ns at $stopTime "$cbr stop"

    return $udp
}

#
# Return the reference to the link between node1 and node2
#
Simulator instproc get-link { node1 node2 } {
    $self instvar link_
    set id1 [$node1 id]
    set id2 [$node2 id]
    return $link_($id1:$id2)
}

#
# Return the reference to the output queue at node1 
# for the link between node1 and node2
# 
# NOTE: In most cases the output queue coincides to the 
# scheduling discipline associated to that output
#
Simulator instproc get-queue { node1 node2 } {
    global ns
    set l [$ns get-link $node1 $node2]
    set q [$l queue]
    return $q
}