Constant Throughput Timer performance

2005-12-09 Thread Iago Toral Quiroga
Hi,

I've configured a test with 100 thread groups (one thread per thread
group) and added a constant throughput timer to get a 10 requests per
second performance. To do so, I configured target throughput to 600
(samples per minute) and selected to compute performance based on all
active threads.

The result is as expected, I get an average throughput of 10 requests
per second, but they are not uniform along the time. What I get is
something like this:

At second 0, jmeter launches 100 requests to the server. At second 4,
jmeter has received all the responses, but because it has lauched 100
requests at second 0, it must wait till second 10 to start another bunch
of 100 requests. What I expect from this kind of tests is getting 10
requests per second *each second*.

This kind of behaviour is much more like a repeated peak test than a
constant troughput test. I know I can get a more uniform test by droping
the thread count so jmeter would have to wait less time to launch the
next bunch of requests, but that is weird and still a trick that does
not solve the point of problem at all ¿I'm missing something?, ¿is there
a way to get a more uniform behaviour for this kind of tests?

Thanks in advance for your help!
-- 
Abel Iago Toral Quiroga 
Igalia http://www.igalia.com

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



I can't get any solution please see what's the prob.

2005-12-09 Thread Tapaswini Das
Hi All,

 

I'm using JMeter 2.0.3 for Load testing.

 

 My test plan contains

 

1.   Login in the user site.

2.   Click on the respective PPT.

3.   View.

4.   Preview.

5.   Add Slide to Slide bin of the default tray.

6.   Download the Slides.

7.   Clear the Tray

 

The Download action contains extra time. The application Downloads file in
Presentation Engine Makedoc folder. But in JMeter its showing error. Please
let me know where the problem is. 

 




function add_days(adate,days) { return new Date(adate.getTime() + (days *
8640)); } function onLoad() { checkNavbarUpdate();
setOptionMenu('default'); var frm = document.frmEmail; frm.reset(); if
(frm.rEmailType!=null) { frm.expireEnabled.value = frm.rEmailType(0).checked
? true : false; } } function onUnload() {} function
popupDownloadTerms(target) { url =
'/newubs/servlet/com.heartbeat.slideengine.user.controller.Controller?action
=downloadTerms&jsp=downloadTerms&target=' + escape(target);
window.open(url,'DownloadTerms','width=550,height=545'); } // quick browser
tests var ns4 = (document.layers) ? true : false; var ie4 = (document.all &&
!document.getElementById) ? true : false; var ie5 = (document.all &&
document.getElementById) ? true : false; var ns6 = (!document.all && 



document.getElementById) ? true : false; function showObj(sw,obj) { //
show/hide the divisions if (sw && (ie4 || ie5) )
document.all[obj].style.visibility = 'visible'; if (!sw && (ie4 || ie5) )
document.all[obj].style.visibility = 'hidden'; if (sw && ns4)
document.layers[obj].visibility = 'visible'; if (!sw && ns4)
document.layers[obj].visibility = 'hidden'; } function enabledObject(obj,
enabled) { obj.disabled= enabled; } function enabledExpiryDate(frm, enabled)
{ frm.expireEnabled.value = frm.rEmailType(0).checked ? true : false; }
function emailCheck (emailStr, showResult) { var checkTLD=1; var
knownDomsPat=/^(com|net|org|edu|int|mil|gov|arpa|biz|aero|name|coop|info|pro
|museum)$/; var emailPat=/^(.+)@(.+)$/; var
specialChars="\\(\\)>@,;:\\\"\\.\\[\\]"; var validChars="\[^\\s" +
specialChars + "\]"; var quotedUser="(\"[^\"]*\")"; var
ipDomainPat=/^\[(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})\]$/; var
atom=validChars + '+'; var word="(" + atom + "|" + quotedUser + ")"; var
userPat=new RegExp("^" + word + "(\\." + word + ")*$"); var domainPat=new
RegExp("^" + atom + "(\\." + atom +")*$"); var
matchArray=emailStr.match(emailPat); if (emailStr==null||emailStr=="") { if
(showResult) alert("A confirmation email must be specified"); return false;
} if (matchArray==null) { if (showResult) alert("Email address seems
incorrect (check @ and .'s)"); return false; } var user=matchArray[1]; var
domain=matchArray[2]; for (i=0; i 127) { if (showResult) alert("Ths username
contains invalid characters."); return false; } } for (i=0; i 127) { if
(showResult) alert("Ths domain name contains invalid characters."); return
false; } } if (user.match(userPat)==null) { if (showResult) alert("The
username doesn't seem to be valid."); return false; } var
IPArray=domain.match(ipDomainPat); if (IPArray!=null) { for (var
i=1;i=4;i++) { if (IPArray[i]>255) { if (showResult) alert("Destination IP
address is invalid!"); return false; } } return true; } var atomPat=new
RegExp("^" + atom + "$"); var domArr=domain.split("."); var
len=domArr.length; for (i=0;i { if (showResult) alert("The domain name does
not seem to be valid."); return false; } } if (checkTLD &&
domArr[domArr.length-1].length!=2 &&
domArr[domArr.length-1].toLowerCase().search(knownDomsPat)==-1) { if
(showResult) alert("The address must end in a well-known domain or two
letter " + "country."); return false; } if (len2) { if (showResult)
alert("This address is missing a hostname!"); return false; } return true; }
function noDuplicateEmail(emailList) { var index = 0; var
emailArr=emailList.split(","); var noDupEmail = emailArr[0].trim() + ',';
for (var i=1; i { noDupEmail += emailArr[i].trim() + ',' ; } } return
noDupEmail.substring(0, noDupEmail.length-1); } function
checkValueOfEmailForm(showResult) { var frm = document.frmEmail; // Fix
PTR-1703 Email Tray page: // Incorrect notification when clicks at 'Email
Now' while the 'Send to' field is blank. if(showResult &&
failEmpty(frm.tEmail, "Recipient email address")) { return false; } // Basic
email checking var emailArr = frm.tEmail.value.split(","); var
confirmedEmailArr = frm.tConfirmedEmail.value.split(","); for (var i=0; i /
Check duplicate email in email textfield frm.tEmail.value =
noDuplicateEmail(frm.tEmail.value); frm.tConfirmedEmail.value =
noDuplicateEmail(frm.tConfirmedEmail.value); // Check that the user must at
least select 1 type of file /*var passExistsType = false; if
(frm.cFileTypePPT!=null) { if (frm.cFileTypePPT.checked) { passExistsType =
true; } } if (frm.cFileTypePDF!=null) { if (frm.cFileTypePDF.checked) {
passExistsType = true; } } if (frm.cFileTypeAssets!=null) { if
(frm.cFileTypeAssets.checked) { passExistsType = true; } } if

Peak tests

2005-12-09 Thread Iago Toral Quiroga
hi!,

I'm using Jmeter to perform a peak test of my web server (100 http
requests at the same time). To do such, I've created 100 thread groups,
each one with one thread that sends a different http request. At the web
server I log the time (in milliseconds) at which each request is
received.

I need these requests to be sent to the web server as close as posible
but I noticed they are are logged at the web server in a period of time
that varies but is never lesser than 0.8 secs.

¿Shouldn't jmeter be able to send 100 requests in a leesser period of
time? ¿Is there any way to boost the launching of these requests?

I've also noticed that, if I enable the option to parse HTML in each
HTTP request (HTTPSampler.image_parser in jmx file), my web server log
tells me that jmeter needs 2 or even more seconds to send all the 100
requests, which leads me to think that some threads start processing its
response before all requests have been sent ¿can I change this
behaviour? This is a big problem, because this way, Jmeter is limited in
its capacity to send the requests as soon as posible to stress the
server.

My test machine has the following features:
CPU: 2.4 GHz
RAM: 512 MB
OS:  Debian Linux. Kernel 2.6.12.

Thanks in advance for your help.
-- 
Abel Iago Toral Quiroga 
Igalia http://www.igalia.com

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



test

2005-12-09 Thread Umesh Pawar
 



Re: How to test sample succes to run or cancel next sample ?

2005-12-09 Thread Nicolas

Hi,
I'd like to try another way to solve my problem using beanshell sampler 
as I can manipulate IsSuccess variable with beanshell.
But as I downloaded the latest jar for beanshell and added it to the 
classpath in jmeter GUI, I can't create a script. I tells me that 
beanshell interpreter was not found. Ho do I set jmeter to use beanshell ?


Thanks,
Nicolas.

Nicolas wrote:


Hi,

I built a test case to test load on an intranet website of my company 
using JMeter.
The goal is to test load of the HTML GUI of the site, not database 
load or anything else.


I first explain below what I already tested and the problems I 
encountered.
Finally the question is on the end of this e-mail, I hope you'll get 
there...


So I first test the user login to the site and then test the call of a 
page showing a list of products.


On this site, users have to be logged in to see the products listing 
page.
So I'd like to run the http sample that call the listing page only if 
the http sample that call the login page and tries to log in is 
successful.
A successful login is tested using a regular expression to look for a 
session id in the HTML resulting page.


I tried to add a Result Status Action Handler post-processor in my 
login http sample, but it stops the tread definitely on failure.
What I want is just a loop to be stopped and the thread have to 
continue in the next loop because login can fail due to heavy load or 
any other problem and well work on next loop. It's a shame to stop all 
the thread for that.


Another way I tried was to create an IF logic controller, but I don't 
know which variable value to test in this IF.
As I already had a Regular Expression Extractor to get a variable from 
the session ID returned by the login result page, I tried to use it 
for my test.
I added a "null" value in the default value field of the Regular 
Expression Extractor to have a value to test.

In my IF logic controller, I test if my variable is different to "null".

But this works because I have something to retrieve as I need the 
session ID in other samples.


My problem is that I also want to run other samples on a basis of 
preceding sample success, and this samples don't necessary need to 
retrieve any informations as the login sample do. I don't want to 
create extra test such as Regular Expression Extractor if I don't need 
to retrieve any informations. It would duplicate the assertions I 
already added to the samples and complicate the test case.


So I'd like to test if the preceding sample completed successfully 
before running any new sample without using Regular Expression Extractor.


One of my ideas is to use a JMeter variable, if such a variable exist.
So is there any JMeter variable, or anything else that would look like 
${isPrecedingSampleSuccessfull} for example to test if preceding 
sample in the same thread succeed ?


Did I miss something important in the JMeter usage ?

Thanks,
Nicolas.

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



testing a WebSphere Portal

2005-12-09 Thread Sorin N. Ciolofan
Hello!

Sorry that I disturb you again. Can somebody answer my question:
Can I test a WebSphere portal with jmeter?
The portal url's are in the following format:
http://172.16.1.124:9081/wps/myportal/!ut/p/kcxml/04_Sj9SPykssy0xPLMnMz0vM0Y_QjzKLN4i3dAXJgFku-pGoIsam6CKOcAFfj_zcVP2gonx9b_0A_YLc0NCIckdFAJmrntE!/delta/base64xml/L3dJdyEvd0ZNQUFzQUMvNElVRS82XzBfSkk!

Thanks


-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: Peak tests

2005-12-09 Thread Peter Lin
I'm not sure I understand why you have 100 thread groups.

you can put the requests in sequence in 1 threadGroup and increase the
thread count to 100 with 0 second ramp up.

peter


On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
>
> hi!,
>
> I'm using Jmeter to perform a peak test of my web server (100 http
> requests at the same time). To do such, I've created 100 thread groups,
> each one with one thread that sends a different http request. At the web
> server I log the time (in milliseconds) at which each request is
> received.
>
> I need these requests to be sent to the web server as close as posible
> but I noticed they are are logged at the web server in a period of time
> that varies but is never lesser than 0.8 secs.
>
> ¿Shouldn't jmeter be able to send 100 requests in a leesser period of
> time? ¿Is there any way to boost the launching of these requests?
>
> I've also noticed that, if I enable the option to parse HTML in each
> HTTP request (HTTPSampler.image_parser in jmx file), my web server log
> tells me that jmeter needs 2 or even more seconds to send all the 100
> requests, which leads me to think that some threads start processing its
> response before all requests have been sent ¿can I change this
> behaviour? This is a big problem, because this way, Jmeter is limited in
> its capacity to send the requests as soon as posible to stress the
> server.
>
> My test machine has the following features:
> CPU: 2.4 GHz
> RAM: 512 MB
> OS:  Debian Linux. Kernel 2.6.12.
>
> Thanks in advance for your help.
> --
> Abel Iago Toral Quiroga
> Igalia http://www.igalia.com
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
>


Re: testing a WebSphere Portal

2005-12-09 Thread Peter Lin
people have used jmeter to test struts, so it should be feasible. I've never
done it myself, so can't really give you an pointers.

peter


On 12/9/05, Sorin N. Ciolofan <[EMAIL PROTECTED]> wrote:
>
> Hello!
>
> Sorry that I disturb you again. Can somebody answer my question:
> Can I test a WebSphere portal with jmeter?
> The portal url's are in the following format:
>
> http://172.16.1.124:9081/wps/myportal/!ut/p/kcxml/04_Sj9SPykssy0xPLMnMz0vM0Y_QjzKLN4i3dAXJgFku-pGoIsam6CKOcAFfj_zcVP2gonx9b_0A_YLc0NCIckdFAJmrntE!/delta/base64xml/L3dJdyEvd0ZNQUFzQUMvNElVRS82XzBfSkk
> !
>
> Thanks
>
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
>


Re: Peak tests

2005-12-09 Thread Iago Toral Quiroga
El vie, 09-12-2005 a las 15:17, Peter Lin escribió:
> I'm not sure I understand why you have 100 thread groups.
> 
> you can put the requests in sequence in 1 threadGroup and increase the
> thread count to 100 with 0 second ramp up.
> peter

Because the requests must be different. If I do what you say, 
all the 100 threads within the threadgroup will send the same 
request (the first one in the sequence).

I tried using an interleave controller to avoid such problem, but the
interleave controller just deals requests for each thread, so the result
is the same.

Anyway, I've also tried having one thread group and 100 threads within
it sending the same HTTP request, but I still have the performance
problem I commented in my previous email.

Iago.


> 
> On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> >
> > hi!,
> >
> > I'm using Jmeter to perform a peak test of my web server (100 http
> > requests at the same time). To do such, I've created 100 thread groups,
> > each one with one thread that sends a different http request. At the web
> > server I log the time (in milliseconds) at which each request is
> > received.
> >
> > I need these requests to be sent to the web server as close as posible
> > but I noticed they are are logged at the web server in a period of time
> > that varies but is never lesser than 0.8 secs.
> >
> > ¿Shouldn't jmeter be able to send 100 requests in a leesser period of
> > time? ¿Is there any way to boost the launching of these requests?
> >
> > I've also noticed that, if I enable the option to parse HTML in each
> > HTTP request (HTTPSampler.image_parser in jmx file), my web server log
> > tells me that jmeter needs 2 or even more seconds to send all the 100
> > requests, which leads me to think that some threads start processing its
> > response before all requests have been sent ¿can I change this
> > behaviour? This is a big problem, because this way, Jmeter is limited in
> > its capacity to send the requests as soon as posible to stress the
> > server.
> >
> > My test machine has the following features:
> > CPU: 2.4 GHz
> > RAM: 512 MB
> > OS:  Debian Linux. Kernel 2.6.12.
> >
> > Thanks in advance for your help.
> > --
> > Abel Iago Toral Quiroga
> > Igalia http://www.igalia.com
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> >
> >

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: Peak tests

2005-12-09 Thread Peter Lin
for what it's worth, it's nearly impossible to get all 100 requests within
500ms. The reason for this is making the initial connection to your
webserver will have a high initial cost.  How many iterations are you using.

if you look at all formal performance test specifications, they all have a
ramp up time. The actual measurement is taken for a period after the server
has reached a steady state. does that make sense?

what you need to do is set the iterations to something like 1000. start the
test and then start counting from like 10minutes after the test started to
get an accurate measurement.

peter


On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
>
> El vie, 09-12-2005 a las 15:17, Peter Lin escribió:
> > I'm not sure I understand why you have 100 thread groups.
> >
> > you can put the requests in sequence in 1 threadGroup and increase the
> > thread count to 100 with 0 second ramp up.
> > peter
>
> Because the requests must be different. If I do what you say,
> all the 100 threads within the threadgroup will send the same
> request (the first one in the sequence).
>
> I tried using an interleave controller to avoid such problem, but the
> interleave controller just deals requests for each thread, so the result
> is the same.
>
> Anyway, I've also tried having one thread group and 100 threads within
> it sending the same HTTP request, but I still have the performance
> problem I commented in my previous email.
>
> Iago.
>
>
> >
> > On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > >
> > > hi!,
> > >
> > > I'm using Jmeter to perform a peak test of my web server (100 http
> > > requests at the same time). To do such, I've created 100 thread
> groups,
> > > each one with one thread that sends a different http request. At the
> web
> > > server I log the time (in milliseconds) at which each request is
> > > received.
> > >
> > > I need these requests to be sent to the web server as close as posible
> > > but I noticed they are are logged at the web server in a period of
> time
> > > that varies but is never lesser than 0.8 secs.
> > >
> > > ¿Shouldn't jmeter be able to send 100 requests in a leesser period of
> > > time? ¿Is there any way to boost the launching of these requests?
> > >
> > > I've also noticed that, if I enable the option to parse HTML in each
> > > HTTP request (HTTPSampler.image_parser in jmx file), my web server log
> > > tells me that jmeter needs 2 or even more seconds to send all the 100
> > > requests, which leads me to think that some threads start processing
> its
> > > response before all requests have been sent ¿can I change this
> > > behaviour? This is a big problem, because this way, Jmeter is limited
> in
> > > its capacity to send the requests as soon as posible to stress the
> > > server.
> > >
> > > My test machine has the following features:
> > > CPU: 2.4 GHz
> > > RAM: 512 MB
> > > OS:  Debian Linux. Kernel 2.6.12.
> > >
> > > Thanks in advance for your help.
> > > --
> > > Abel Iago Toral Quiroga
> > > Igalia http://www.igalia.com
> > >
> > > -
> > > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > > For additional commands, e-mail: [EMAIL PROTECTED]
> > >
> > >
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
>


Re: Peak tests

2005-12-09 Thread Iago Toral Quiroga
First of all, thanks a lot for your answer peter,
I comment it between lines:

El vie, 09-12-2005 a las 16:00, Peter Lin escribió:
> for what it's worth, it's nearly impossible to get all 100 requests within
> 500ms. The reason for this is making the initial connection to your
> webserver will have a high initial cost.  How many iterations are you using.

Just one per threadgroup because I want just 100 requests as close as
posible in time. Anyway, I understand what you say about the dificulty
of having all 100 request in 500ms. One thing I think has a lot to do
with this, besides the connection issue you talk about, is the fact that
some threads begin processing their responses before all the threads
send their requests, because this impedes other threads to enter into
CPU and send their requests, but I guess this is not a Jmeter issue, but
a kernel or a JVM matter.

> if you look at all formal performance test specifications, they all have a
> ramp up time. The actual measurement is taken for a period after the server
> has reached a steady state. does that make sense?

> what you need to do is set the iterations to something like 1000. start the
> test and then start counting from like 10minutes after the test started to
> get an accurate measurement.
> 

I get it, but this is not the scenario I want to measure. Besides the
scenario you talk about, we also need to know the maximum number of
requests the web server can handle if they come "at the same time". So,
imagine the web server has no requests to serve, and suddenly, N
requests come about at the same time, what we want to know is: how big N
can be? or what happens when N is like 50, 100, 300,... ?

Notice that I need the server to be "idle" before all the requests come
about, because if it's steady serving responses it's  not servinig just
N requests, but N plus all the requests it was already serving.

Thanks again for hour help.
Iago.

> On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> >
> > El vie, 09-12-2005 a las 15:17, Peter Lin escribió:
> > > I'm not sure I understand why you have 100 thread groups.
> > >
> > > you can put the requests in sequence in 1 threadGroup and increase the
> > > thread count to 100 with 0 second ramp up.
> > > peter
> >
> > Because the requests must be different. If I do what you say,
> > all the 100 threads within the threadgroup will send the same
> > request (the first one in the sequence).
> >
> > I tried using an interleave controller to avoid such problem, but the
> > interleave controller just deals requests for each thread, so the result
> > is the same.
> >
> > Anyway, I've also tried having one thread group and 100 threads within
> > it sending the same HTTP request, but I still have the performance
> > problem I commented in my previous email.
> >
> > Iago.
> >
> >
> > >
> > > On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > >
> > > > hi!,
> > > >
> > > > I'm using Jmeter to perform a peak test of my web server (100 http
> > > > requests at the same time). To do such, I've created 100 thread
> > groups,
> > > > each one with one thread that sends a different http request. At the
> > web
> > > > server I log the time (in milliseconds) at which each request is
> > > > received.
> > > >
> > > > I need these requests to be sent to the web server as close as posible
> > > > but I noticed they are are logged at the web server in a period of
> > time
> > > > that varies but is never lesser than 0.8 secs.
> > > >
> > > > ¿Shouldn't jmeter be able to send 100 requests in a leesser period of
> > > > time? ¿Is there any way to boost the launching of these requests?
> > > >
> > > > I've also noticed that, if I enable the option to parse HTML in each
> > > > HTTP request (HTTPSampler.image_parser in jmx file), my web server log
> > > > tells me that jmeter needs 2 or even more seconds to send all the 100
> > > > requests, which leads me to think that some threads start processing
> > its
> > > > response before all requests have been sent ¿can I change this
> > > > behaviour? This is a big problem, because this way, Jmeter is limited
> > in
> > > > its capacity to send the requests as soon as posible to stress the
> > > > server.
> > > >
> > > > My test machine has the following features:
> > > > CPU: 2.4 GHz
> > > > RAM: 512 MB
> > > > OS:  Debian Linux. Kernel 2.6.12.
> > > >
> > > > Thanks in advance for your help.
> > > > --
> > > > Abel Iago Toral Quiroga
> > > > Igalia http://www.igalia.com
> > > >
> > > > -
> > > > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > > > For additional commands, e-mail: [EMAIL PROTECTED]
> > > >
> > > >
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> >
> >
-- 
Abel Iago Toral Quiroga 
Igalia http://www.igalia.com

---

Re: Peak tests

2005-12-09 Thread Peter Lin
your explanation helps, but here's the thing. Say I want to simulate the /.
effect. If 5K people all hit /. at the same exact nanosecond, all the
connections will still be queued up by the server and the webserver will
process them one at a time. As soon as a server thread/precess starts to
process the request, it's going to slow down the processing for all
subsequent requests.

therefore, it's really hard to do unless the server has lots of CPU's like
24 and multiple ethernet cards. On PC hardware, it's going to be very hard,
if not impossible. On a mainframe, it will be easier to simulate a large
number of truly concurrent requests.  If you want to reduce the likelihood
of JMeter being an issue, then I would setup 4 clients to hit a single
server. Though I really doubt you'll a significant difference. Having done
these types of tests a few hundred times, it's just hard to do.  beyond
that, the bandwidth will severely limit the number of concurrent requests
the server can handle.  only way to avoid the network bottleneck is to pay
big bucks and co-locate at a backbone provider like MCI, Level3, Quest,
Global Crossing, or ATT.

hope that helps.

peter


On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
>
> First of all, thanks a lot for your answer peter,
> I comment it between lines:
>
> El vie, 09-12-2005 a las 16:00, Peter Lin escribió:
> > for what it's worth, it's nearly impossible to get all 100 requests
> within
> > 500ms. The reason for this is making the initial connection to your
> > webserver will have a high initial cost.  How many iterations are you
> using.
>
> Just one per threadgroup because I want just 100 requests as close as
> posible in time. Anyway, I understand what you say about the dificulty
> of having all 100 request in 500ms. One thing I think has a lot to do
> with this, besides the connection issue you talk about, is the fact that
> some threads begin processing their responses before all the threads
> send their requests, because this impedes other threads to enter into
> CPU and send their requests, but I guess this is not a Jmeter issue, but
> a kernel or a JVM matter.
>
> > if you look at all formal performance test specifications, they all have
> a
> > ramp up time. The actual measurement is taken for a period after the
> server
> > has reached a steady state. does that make sense?
>
> > what you need to do is set the iterations to something like 1000. start
> the
> > test and then start counting from like 10minutes after the test started
> to
> > get an accurate measurement.
> >
>
> I get it, but this is not the scenario I want to measure. Besides the
> scenario you talk about, we also need to know the maximum number of
> requests the web server can handle if they come "at the same time". So,
> imagine the web server has no requests to serve, and suddenly, N
> requests come about at the same time, what we want to know is: how big N
> can be? or what happens when N is like 50, 100, 300,... ?
>
> Notice that I need the server to be "idle" before all the requests come
> about, because if it's steady serving responses it's  not servinig just
> N requests, but N plus all the requests it was already serving.
>
> Thanks again for hour help.
> Iago.
>
> > On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > >
> > > El vie, 09-12-2005 a las 15:17, Peter Lin escribió:
> > > > I'm not sure I understand why you have 100 thread groups.
> > > >
> > > > you can put the requests in sequence in 1 threadGroup and increase
> the
> > > > thread count to 100 with 0 second ramp up.
> > > > peter
> > >
> > > Because the requests must be different. If I do what you say,
> > > all the 100 threads within the threadgroup will send the same
> > > request (the first one in the sequence).
> > >
> > > I tried using an interleave controller to avoid such problem, but the
> > > interleave controller just deals requests for each thread, so the
> result
> > > is the same.
> > >
> > > Anyway, I've also tried having one thread group and 100 threads within
> > > it sending the same HTTP request, but I still have the performance
> > > problem I commented in my previous email.
> > >
> > > Iago.
> > >
> > >
> > > >
> > > > On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > > >
> > > > > hi!,
> > > > >
> > > > > I'm using Jmeter to perform a peak test of my web server (100 http
> > > > > requests at the same time). To do such, I've created 100 thread
> > > groups,
> > > > > each one with one thread that sends a different http request. At
> the
> > > web
> > > > > server I log the time (in milliseconds) at which each request is
> > > > > received.
> > > > >
> > > > > I need these requests to be sent to the web server as close as
> posible
> > > > > but I noticed they are are logged at the web server in a period of
> > > time
> > > > > that varies but is never lesser than 0.8 secs.
> > > > >
> > > > > ¿Shouldn't jmeter be able to send 100 requests in a leesser period
> of
> > > > > tim

AW: Value replacing in streams

2005-12-09 Thread Brudermann Roger
Thanks for all your feedback. I am currently working on my own stream 
replacement facility. 

As far as I understand the JMeter implementation, all the preprocessor 
variables are stored in an instance of JMeterVariables which can be accessed by 
means of calling JMeterContextService.getContext().getVariables(). To do the 
actual replacement in my stream I would like to know the names of all the 
(counter) variables that have been configured. Unfortunately the class 
JMeterVariables does not offer an operation to extract these names. It looks 
like I am a bit stuck here.

Do you know a solution or am I missing something?

Roger

-Ursprüngliche Nachricht-
Von: sebb [mailto:[EMAIL PROTECTED]
Gesendet: Montag, 5. Dezember 2005 13:51
An: JMeter Users List
Betreff: Re: Value replacing in streams


You will have to implement your own "stream edit" facility.

S.
On 05/12/05, Brudermann Roger <[EMAIL PROTECTED]> wrote:
> Dear JMeter Community
>
> We have the need to do performance testing with with huge SOAP and JMS
> messages (e.g. 50MB). Instead of pasting these messages in a text box of an
> appropriate sampler, we had the idea to write an own sampler which reads the
> messages as data streams directly from the file system. So far so good.
>
> The problem is that we need to use pre processor variables (counters) to
> customize the messages. We noticed that counter value replacing does not
> take place and we think this is due to the fact that the messages are not
> stored in a sampler property. Does somebody have an idea how we could
> implement some "on the fly value replacing" while reading the message stream
> from the file system (e.g. line by line)?
>
> Any help is greatly appreciated! Thanks!
>
> Regards,
> Roger Brudermann
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
>

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: Value replacing in streams

2005-12-09 Thread Peter Lin
mike or sebb would know more about this area. my knowledge of the variable
stuff is pretty weak

peter


On 12/9/05, Brudermann Roger <[EMAIL PROTECTED]> wrote:
>
> Thanks for all your feedback. I am currently working on my own stream
> replacement facility.
>
> As far as I understand the JMeter implementation, all the preprocessor
> variables are stored in an instance of JMeterVariables which can be accessed
> by means of calling JMeterContextService.getContext().getVariables(). To
> do the actual replacement in my stream I would like to know the names of all
> the (counter) variables that have been configured. Unfortunately the class
> JMeterVariables does not offer an operation to extract these names. It looks
> like I am a bit stuck here.
>
> Do you know a solution or am I missing something?
>
> Roger
>
> -Ursprüngliche Nachricht-
> Von: sebb [mailto:[EMAIL PROTECTED]
> Gesendet: Montag, 5. Dezember 2005 13:51
> An: JMeter Users List
> Betreff: Re: Value replacing in streams
>
>
> You will have to implement your own "stream edit" facility.
>
> S.
> On 05/12/05, Brudermann Roger <[EMAIL PROTECTED]> wrote:
> > Dear JMeter Community
> >
> > We have the need to do performance testing with with huge SOAP and JMS
> > messages (e.g. 50MB). Instead of pasting these messages in a text box of
> an
> > appropriate sampler, we had the idea to write an own sampler which reads
> the
> > messages as data streams directly from the file system. So far so good.
> >
> > The problem is that we need to use pre processor variables (counters) to
> > customize the messages. We noticed that counter value replacing does not
> > take place and we think this is due to the fact that the messages are
> not
> > stored in a sampler property. Does somebody have an idea how we could
> > implement some "on the fly value replacing" while reading the message
> stream
> > from the file system (e.g. line by line)?
> >
> > Any help is greatly appreciated! Thanks!
> >
> > Regards,
> > Roger Brudermann
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> >
> >
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
>


When to use which http sampler?

2005-12-09 Thread Christensen, Alan
I originally wrote all my scripts using the "HTTP Request" sampler.  Now
there is a second one that is called "HTTP Request HTTPClient".  What is
the advantage of using one vs the other? 

So far the "HTTP Request" sampler has worked well for my tests except
that the line speed property cannot be used with this sampler.

The "HTTP Request HTTPClient" sampler does work with the line speed
property but the current version in the December 7th nightly build
doesn't seem to work with https (parameter issue?) and doesn't
decompress pages that were requested with compression enabled.  Hence
all assertions fail.  These issues would seem to make this new sampler
not worth using unless you must take advantage of the line speed
property.  Are there any advantages of this sampler that I don't know
about?  Why was it developed?




Re: When to use which http sampler?

2005-12-09 Thread Peter Lin
the advantage of the HTTPClient version is it supports keep alive and slow
connections correctly. the default sun implementation does not and hasn't
since the beginning.

peter


On 12/9/05, Christensen, Alan <[EMAIL PROTECTED]> wrote:
>
> I originally wrote all my scripts using the "HTTP Request" sampler.  Now
> there is a second one that is called "HTTP Request HTTPClient".  What is
> the advantage of using one vs the other?
>
> So far the "HTTP Request" sampler has worked well for my tests except
> that the line speed property cannot be used with this sampler.
>
> The "HTTP Request HTTPClient" sampler does work with the line speed
> property but the current version in the December 7th nightly build
> doesn't seem to work with https (parameter issue?) and doesn't
> decompress pages that were requested with compression enabled.  Hence
> all assertions fail.  These issues would seem to make this new sampler
> not worth using unless you must take advantage of the line speed
> property.  Are there any advantages of this sampler that I don't know
> about?  Why was it developed?
>
>
>
>


Re: Testing the effect of page compression with Jmeter; Any gotchas?

2005-12-09 Thread sebb
The file

extras/ConvertHTTPSampler.txt

gives instructions on how to convert the samplers.

S.
On 08/12/05, Peter Lin <[EMAIL PROTECTED]> wrote:
> it is the one named HTTPClient. that is correct.
>
> peter
>
>
> On 12/8/05, Christensen, Alan <[EMAIL PROTECTED]> wrote:
> >
> > I'm not sure I know what the Http Sampler using Apache HttpClient is.
> > Is this the http sampler that is labeled "HTTP Request HTTPClient"?  I
> > have a large script that is using the other one (HTTP request).  Is
> > there any way to quickly change the type of sampler in my script by
> > doing some sort of a "replace all" using a word editor with my script?
> >
> > -Original Message-
> > From: sebb [mailto:[EMAIL PROTECTED]
> > Sent: Thursday, December 08, 2005 10:30 AM
> > To: JMeter Users List
> > Subject: Re: Testing the effect of page compression with Jmeter; Any
> > gotchas?
> >
> > "cps" should perhaps be named "bps", because it calculates the delay
> > based on the number of Java bytes sent or received.
> >
> > These are 8 bits, but that is without the overhead, which can vary
> > between connections - e.g. number of stop-bits, parity etc.
> >
> > Note that the setting only applies to the Http Sampler using Apache
> > HttpClient.
> >
> > It has no effect on the original Http Sampler which uses the JVM Http
> > implementation.
> >
> > Whatever setting is chosen will be approximate, because it does not take
> > the actual line speed into account.
> >
> > S.
> > On 08/12/05, Peter Lin <[EMAIL PROTECTED]> wrote:
> > > hmm... I haven't used the new feature myself. maybe sebb can respond
> > > with some tips
> > >
> > > peter
> > >
> > >
> > > On 12/8/05, Christensen, Alan <[EMAIL PROTECTED]> wrote:
> > > >
> > > > I tried setting these properties first to 6250 and later both to
> > 100.
> > > > Neither setting appeared to have any significant impact. (And, yes,
> > > > I did get rid of the "#" in column 1)  :-)
> > > >
> > > > I am using the December 7th nightly build to try out this
> > capability.
> > > >
> > > > I tried first via VPN, then directly to the site. No impact of
> > > > setting these parameters in either configuration.
> > > >
> > > > Any ideas?
> > > >
> > > > -Original Message-
> > > > From: Christensen, Alan
> > > > Sent: Thursday, December 08, 2005 8:45 AM
> > > > To: JMeter Users List
> > > > Subject: RE: Testing the effect of page compression with Jmeter; Any
> >
> > > > gotchas?
> > > >
> > > > The only lines that I could find in the Dec 7th nightly build
> > > > regarding this property are in the Jmeter properties file:
> > > >
> > > > # Define characters per second > 0 to emulate slow connections
> > > > #httpclient.socket.http.cps=0 #httpclient.socket.https.cps=0
> > > >
> > > > I just want to confirm that I should be using 8 bits/character. My
> > > > suspicion is also that a 50kbps line doesn't really deliver 6250
> > > > char/sec but some smaller number due to overhead.  Is this the case?
> >
> > > > If so, should I use a smaller number for
> > > > "httpclient.socket.http.cps" than 6250?  If anyone has thoughts on
> > > > the most appropriate number to use to simulate a 50kbps dialup line,
> > then I'd appreciate their advice.
> > > >
> > > > -Original Message-
> > > > From: Peter Lin [mailto:[EMAIL PROTECTED]
> > > > Sent: Thursday, December 08, 2005 8:19 AM
> > > > To: JMeter Users List
> > > > Subject: Re: Testing the effect of page compression with Jmeter; Any
> >
> > > > gotchas?
> > > >
> > > > sebb would know how to do that. I believe he updated the docs in
> > > > SVN, but the website hasn't been updated yet. if you download a
> > > > nightly, the docs packaged in the tar/zip should have an
> > explanation.
> > > >
> > > > peter
> > > >
> > > >
> > > > On 12/8/05, Christensen, Alan <[EMAIL PROTECTED]> wrote:
> > > > >
> > > > > How exactly is this property set for various speeds? If I wanted
> > > > > to emulate a 50kbps dialup line, what would I set this property
> > > > > to?  Do I
> > > >
> > > > > set it to 5/8 = 6250?
> > > > >
> > > > > -Original Message-
> > > > > From: sebb [mailto:[EMAIL PROTECTED]
> > > > > Sent: Monday, November 28, 2005 9:02 AM
> > > > > To: JMeter Users List
> > > > > Subject: Re: Testing the effect of page compression with Jmeter;
> > > > > Any gotchas?
> > > > >
> > > > > As to slower connections, the latest nightly builds include a
> > > > > means of
> > > >
> > > > > simulating slow connections for the Apache HttpClient sampler.
> > > > >
> > > > > See jmeter.properties:
> > > > >
> > > > > #httpclient.socket.http.cps=0
> > > > > #httpclient.socket.https.cps=0
> > > > >
> > > > > ==
> > > > >
> > > > > I looked at extending this to the default Http implementation, but
> >
> > > > > it is non-trivial, and requires overriding the boot classpath, as
> > > > > one has
> > > >
> > > > > to create a java.net class.
> > > > >
> > > > > S.
> > > > > On 28/11/05, Christensen, Alan <[EMAIL PROTECTED]> wrote:
> > > > > >
> > > > > > H

Re: Constant Throughput Timer performance

2005-12-09 Thread sebb
I suspect part of the problem is that all the threads start at once,
and having 100 thread groups with only 1 thread in each will make it
tedious to fix - you'll need to add a gradually increasing delay to
each of the thread groups.

What happens if you have fewer thread groups and more threads in each group?
You can set the ramp-up for each thread-group to ensure that the
threads start more evenly.

S.
On 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> Hi,
>
> I've configured a test with 100 thread groups (one thread per thread
> group) and added a constant throughput timer to get a 10 requests per
> second performance. To do so, I configured target throughput to 600
> (samples per minute) and selected to compute performance based on all
> active threads.
>
> The result is as expected, I get an average throughput of 10 requests
> per second, but they are not uniform along the time. What I get is
> something like this:
>
> At second 0, jmeter launches 100 requests to the server. At second 4,
> jmeter has received all the responses, but because it has lauched 100
> requests at second 0, it must wait till second 10 to start another bunch
> of 100 requests. What I expect from this kind of tests is getting 10
> requests per second *each second*.
>
> This kind of behaviour is much more like a repeated peak test than a
> constant troughput test. I know I can get a more uniform test by droping
> the thread count so jmeter would have to wait less time to launch the
> next bunch of requests, but that is weird and still a trick that does
> not solve the point of problem at all ¿I'm missing something?, ¿is there
> a way to get a more uniform behaviour for this kind of tests?
>
> Thanks in advance for your help!
> --
> Abel Iago Toral Quiroga
> Igalia http://www.igalia.com
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
>

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: How to test sample succes to run or cancel next sample ?

2005-12-09 Thread sebb
Put the bsh jar in the lib directory, and remove from the classpath.

The classpath seems to be processed a bit too late for BeanShell.

S.

On 09/12/05, Nicolas <[EMAIL PROTECTED]> wrote:
> Hi,
> I'd like to try another way to solve my problem using beanshell sampler
> as I can manipulate IsSuccess variable with beanshell.
> But as I downloaded the latest jar for beanshell and added it to the
> classpath in jmeter GUI, I can't create a script. I tells me that
> beanshell interpreter was not found. Ho do I set jmeter to use beanshell ?
>
> Thanks,
> Nicolas.
>
> Nicolas wrote:
>
> > Hi,
> >
> > I built a test case to test load on an intranet website of my company
> > using JMeter.
> > The goal is to test load of the HTML GUI of the site, not database
> > load or anything else.
> >
> > I first explain below what I already tested and the problems I
> > encountered.
> > Finally the question is on the end of this e-mail, I hope you'll get
> > there...
> >
> > So I first test the user login to the site and then test the call of a
> > page showing a list of products.
> >
> > On this site, users have to be logged in to see the products listing
> > page.
> > So I'd like to run the http sample that call the listing page only if
> > the http sample that call the login page and tries to log in is
> > successful.
> > A successful login is tested using a regular expression to look for a
> > session id in the HTML resulting page.
> >
> > I tried to add a Result Status Action Handler post-processor in my
> > login http sample, but it stops the tread definitely on failure.
> > What I want is just a loop to be stopped and the thread have to
> > continue in the next loop because login can fail due to heavy load or
> > any other problem and well work on next loop. It's a shame to stop all
> > the thread for that.
> >
> > Another way I tried was to create an IF logic controller, but I don't
> > know which variable value to test in this IF.
> > As I already had a Regular Expression Extractor to get a variable from
> > the session ID returned by the login result page, I tried to use it
> > for my test.
> > I added a "null" value in the default value field of the Regular
> > Expression Extractor to have a value to test.
> > In my IF logic controller, I test if my variable is different to "null".
> >
> > But this works because I have something to retrieve as I need the
> > session ID in other samples.
> >
> > My problem is that I also want to run other samples on a basis of
> > preceding sample success, and this samples don't necessary need to
> > retrieve any informations as the login sample do. I don't want to
> > create extra test such as Regular Expression Extractor if I don't need
> > to retrieve any informations. It would duplicate the assertions I
> > already added to the samples and complicate the test case.
> >
> > So I'd like to test if the preceding sample completed successfully
> > before running any new sample without using Regular Expression Extractor.
> >
> > One of my ideas is to use a JMeter variable, if such a variable exist.
> > So is there any JMeter variable, or anything else that would look like
> > ${isPrecedingSampleSuccessfull} for example to test if preceding
> > sample in the same thread succeed ?
> >
> > Did I miss something important in the JMeter usage ?
> >
> > Thanks,
> > Nicolas.
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> >
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
>

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: testing a WebSphere Portal

2005-12-09 Thread sebb
Are these fixed URLs? Or do they vary?

If fixed, then record a script.

If they vary, they'll need to be extracted from the previous response.
Have a look a the various Pre- and Post- Processors.

S.
On 09/12/05, Peter Lin <[EMAIL PROTECTED]> wrote:
> people have used jmeter to test struts, so it should be feasible. I've never
> done it myself, so can't really give you an pointers.
>
> peter
>
>
> On 12/9/05, Sorin N. Ciolofan <[EMAIL PROTECTED]> wrote:
> >
> > Hello!
> >
> > Sorry that I disturb you again. Can somebody answer my question:
> > Can I test a WebSphere portal with jmeter?
> > The portal url's are in the following format:
> >
> > http://172.16.1.124:9081/wps/myportal/!ut/p/kcxml/04_Sj9SPykssy0xPLMnMz0vM0Y_QjzKLN4i3dAXJgFku-pGoIsam6CKOcAFfj_zcVP2gonx9b_0A_YLc0NCIckdFAJmrntE!/delta/base64xml/L3dJdyEvd0ZNQUFzQUMvNElVRS82XzBfSkk
> > !
> >
> > Thanks
> >
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> >
> >
>
>

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: Peak tests

2005-12-09 Thread sebb
n 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> El vie, 09-12-2005 a las 15:17, Peter Lin escribió:
> > I'm not sure I understand why you have 100 thread groups.
> >
> > you can put the requests in sequence in 1 threadGroup and increase the
> > thread count to 100 with 0 second ramp up.
> > peter
>
> Because the requests must be different. If I do what you say,
> all the 100 threads within the threadgroup will send the same
> request (the first one in the sequence).

Not necessarily. You can use variables in the requests, and read the
variables from a file using CSV Data Set. Each thread will get a
different line from the file (unless it wraps round).

> I tried using an interleave controller to avoid such problem, but the
> interleave controller just deals requests for each thread, so the result
> is the same.

> Anyway, I've also tried having one thread group and 100 threads within
> it sending the same HTTP request, but I still have the performance
> problem I commented in my previous email.
>
> Iago.
>
>
> >
> > On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > >
> > > hi!,
> > >
> > > I'm using Jmeter to perform a peak test of my web server (100 http
> > > requests at the same time). To do such, I've created 100 thread groups,
> > > each one with one thread that sends a different http request. At the web
> > > server I log the time (in milliseconds) at which each request is
> > > received.
> > >
> > > I need these requests to be sent to the web server as close as posible
> > > but I noticed they are are logged at the web server in a period of time
> > > that varies but is never lesser than 0.8 secs.
> > >
> > > ¿Shouldn't jmeter be able to send 100 requests in a leesser period of
> > > time? ¿Is there any way to boost the launching of these requests?
> > >
> > > I've also noticed that, if I enable the option to parse HTML in each
> > > HTTP request (HTTPSampler.image_parser in jmx file), my web server log
> > > tells me that jmeter needs 2 or even more seconds to send all the 100
> > > requests, which leads me to think that some threads start processing its
> > > response before all requests have been sent ¿can I change this
> > > behaviour? This is a big problem, because this way, Jmeter is limited in
> > > its capacity to send the requests as soon as posible to stress the
> > > server.
> > >
> > > My test machine has the following features:
> > > CPU: 2.4 GHz
> > > RAM: 512 MB
> > > OS:  Debian Linux. Kernel 2.6.12.
> > >
> > > Thanks in advance for your help.
> > > --
> > > Abel Iago Toral Quiroga
> > > Igalia http://www.igalia.com
> > >
> > > -
> > > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > > For additional commands, e-mail: [EMAIL PROTECTED]
> > >
> > >
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
>

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: Peak tests

2005-12-09 Thread Iago Toral Quiroga
I understand what you tell me. I think the problem is a set of things:

1.- The server has to queue all the requests. Although it can have
enough threads to handle all of them, only one thread can be in
execution at a given time.
2.- Jmeter has the same problem when launching all the requests.
3.- The server needs CPU time to process incoming requests. These means
it may be processing a request and having another not loged, but queued.
4.- The same applies to Jmeter, it can have some threads processing
responses while others didn't send their requests yet.

maybe all this stuff makes it almost impossible getting better results.

Thanks again peter!

El vie, 09-12-2005 a las 16:59, Peter Lin escribió:
> your explanation helps, but here's the thing. Say I want to simulate the /.
> effect. If 5K people all hit /. at the same exact nanosecond, all the
> connections will still be queued up by the server and the webserver will
> process them one at a time. As soon as a server thread/precess starts to
> process the request, it's going to slow down the processing for all
> subsequent requests.
> 
> therefore, it's really hard to do unless the server has lots of CPU's like
> 24 and multiple ethernet cards. On PC hardware, it's going to be very hard,
> if not impossible. On a mainframe, it will be easier to simulate a large
> number of truly concurrent requests.  If you want to reduce the likelihood
> of JMeter being an issue, then I would setup 4 clients to hit a single
> server. Though I really doubt you'll a significant difference. Having done
> these types of tests a few hundred times, it's just hard to do.  beyond
> that, the bandwidth will severely limit the number of concurrent requests
> the server can handle.  only way to avoid the network bottleneck is to pay
> big bucks and co-locate at a backbone provider like MCI, Level3, Quest,
> Global Crossing, or ATT.
> 
> hope that helps.
> 
> peter
> 
> 
> On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> >
> > First of all, thanks a lot for your answer peter,
> > I comment it between lines:
> >
> > El vie, 09-12-2005 a las 16:00, Peter Lin escribió:
> > > for what it's worth, it's nearly impossible to get all 100 requests
> > within
> > > 500ms. The reason for this is making the initial connection to your
> > > webserver will have a high initial cost.  How many iterations are you
> > using.
> >
> > Just one per threadgroup because I want just 100 requests as close as
> > posible in time. Anyway, I understand what you say about the dificulty
> > of having all 100 request in 500ms. One thing I think has a lot to do
> > with this, besides the connection issue you talk about, is the fact that
> > some threads begin processing their responses before all the threads
> > send their requests, because this impedes other threads to enter into
> > CPU and send their requests, but I guess this is not a Jmeter issue, but
> > a kernel or a JVM matter.
> >
> > > if you look at all formal performance test specifications, they all have
> > a
> > > ramp up time. The actual measurement is taken for a period after the
> > server
> > > has reached a steady state. does that make sense?
> >
> > > what you need to do is set the iterations to something like 1000. start
> > the
> > > test and then start counting from like 10minutes after the test started
> > to
> > > get an accurate measurement.
> > >
> >
> > I get it, but this is not the scenario I want to measure. Besides the
> > scenario you talk about, we also need to know the maximum number of
> > requests the web server can handle if they come "at the same time". So,
> > imagine the web server has no requests to serve, and suddenly, N
> > requests come about at the same time, what we want to know is: how big N
> > can be? or what happens when N is like 50, 100, 300,... ?
> >
> > Notice that I need the server to be "idle" before all the requests come
> > about, because if it's steady serving responses it's  not servinig just
> > N requests, but N plus all the requests it was already serving.
> >
> > Thanks again for hour help.
> > Iago.
> >
> > > On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > >
> > > > El vie, 09-12-2005 a las 15:17, Peter Lin escribió:
> > > > > I'm not sure I understand why you have 100 thread groups.
> > > > >
> > > > > you can put the requests in sequence in 1 threadGroup and increase
> > the
> > > > > thread count to 100 with 0 second ramp up.
> > > > > peter
> > > >
> > > > Because the requests must be different. If I do what you say,
> > > > all the 100 threads within the threadgroup will send the same
> > > > request (the first one in the sequence).
> > > >
> > > > I tried using an interleave controller to avoid such problem, but the
> > > > interleave controller just deals requests for each thread, so the
> > result
> > > > is the same.
> > > >
> > > > Anyway, I've also tried having one thread group and 100 threads within
> > > > it sending the same HTTP request, but I still have t

Re: Constant Throughput Timer performance

2005-12-09 Thread Iago Toral Quiroga
Thanks for your comment sebb,

if I have more than one thread in each thread group my problem is
ensuring that each thread launches a different request, because each
thread will send the same sequence of requests under the threadgroup.
I've tried using an interleave controller, but it deals the requests for
each thread and not for all the threads in the threadgroup :(

Iago.

El vie, 09-12-2005 a las 18:01, sebb escribió:
> I suspect part of the problem is that all the threads start at once,
> and having 100 thread groups with only 1 thread in each will make it
> tedious to fix - you'll need to add a gradually increasing delay to
> each of the thread groups.
> What happens if you have fewer thread groups and more threads in each group?
> You can set the ramp-up for each thread-group to ensure that the
> threads start more evenly.
> 
> S.
> On 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > Hi,
> >
> > I've configured a test with 100 thread groups (one thread per thread
> > group) and added a constant throughput timer to get a 10 requests per
> > second performance. To do so, I configured target throughput to 600
> > (samples per minute) and selected to compute performance based on all
> > active threads.
> >
> > The result is as expected, I get an average throughput of 10 requests
> > per second, but they are not uniform along the time. What I get is
> > something like this:
> >
> > At second 0, jmeter launches 100 requests to the server. At second 4,
> > jmeter has received all the responses, but because it has lauched 100
> > requests at second 0, it must wait till second 10 to start another bunch
> > of 100 requests. What I expect from this kind of tests is getting 10
> > requests per second *each second*.
> >
> > This kind of behaviour is much more like a repeated peak test than a
> > constant troughput test. I know I can get a more uniform test by droping
> > the thread count so jmeter would have to wait less time to launch the
> > next bunch of requests, but that is weird and still a trick that does
> > not solve the point of problem at all ¿I'm missing something?, ¿is there
> > a way to get a more uniform behaviour for this kind of tests?
> >
> > Thanks in advance for your help!
> > --
> > Abel Iago Toral Quiroga
> > Igalia http://www.igalia.com
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> >
> >
> 
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
-- 
Abel Iago Toral Quiroga 
Igalia http://www.igalia.com

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: Value replacing in streams

2005-12-09 Thread sebb
I added the method

public Iterator getIterator()

to JMeterVariables a little while ago.

It's in the 2.1 branch and the nightly build.

Another way would be to specify the variables that need replacement,
rather than looking for all possible instances of a variable
reference. That might prove quicker.

S
On 09/12/05, Peter Lin <[EMAIL PROTECTED]> wrote:
> mike or sebb would know more about this area. my knowledge of the variable
> stuff is pretty weak
>
> peter
>
>
> On 12/9/05, Brudermann Roger <[EMAIL PROTECTED]> wrote:
> >
> > Thanks for all your feedback. I am currently working on my own stream
> > replacement facility.
> >
> > As far as I understand the JMeter implementation, all the preprocessor
> > variables are stored in an instance of JMeterVariables which can be accessed
> > by means of calling JMeterContextService.getContext().getVariables(). To
> > do the actual replacement in my stream I would like to know the names of all
> > the (counter) variables that have been configured. Unfortunately the class
> > JMeterVariables does not offer an operation to extract these names. It looks
> > like I am a bit stuck here.
> >
> > Do you know a solution or am I missing something?
> >
> > Roger
> >
> > -Ursprüngliche Nachricht-
> > Von: sebb [mailto:[EMAIL PROTECTED]
> > Gesendet: Montag, 5. Dezember 2005 13:51
> > An: JMeter Users List
> > Betreff: Re: Value replacing in streams
> >
> >
> > You will have to implement your own "stream edit" facility.
> >
> > S.
> > On 05/12/05, Brudermann Roger <[EMAIL PROTECTED]> wrote:
> > > Dear JMeter Community
> > >
> > > We have the need to do performance testing with with huge SOAP and JMS
> > > messages (e.g. 50MB). Instead of pasting these messages in a text box of
> > an
> > > appropriate sampler, we had the idea to write an own sampler which reads
> > the
> > > messages as data streams directly from the file system. So far so good.
> > >
> > > The problem is that we need to use pre processor variables (counters) to
> > > customize the messages. We noticed that counter value replacing does not
> > > take place and we think this is due to the fact that the messages are
> > not
> > > stored in a sampler property. Does somebody have an idea how we could
> > > implement some "on the fly value replacing" while reading the message
> > stream
> > > from the file system (e.g. line by line)?
> > >
> > > Any help is greatly appreciated! Thanks!
> > >
> > > Regards,
> > > Roger Brudermann
> > >
> > > -
> > > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > > For additional commands, e-mail: [EMAIL PROTECTED]
> > >
> > >
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> >
> >
>
>

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: Peak tests

2005-12-09 Thread Iago Toral Quiroga
El vie, 09-12-2005 a las 18:10, sebb escribió:
> n 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > El vie, 09-12-2005 a las 15:17, Peter Lin escribió:
> > > I'm not sure I understand why you have 100 thread groups.
> > >
> > > you can put the requests in sequence in 1 threadGroup and increase the
> > > thread count to 100 with 0 second ramp up.
> > > peter
> >
> > Because the requests must be different. If I do what you say,
> > all the 100 threads within the threadgroup will send the same
> > request (the first one in the sequence).
> 
> Not necessarily. You can use variables in the requests, and read the
> variables from a file using CSV Data Set. Each thread will get a
> different line from the file (unless it wraps round).

mmm... really interesting, that could save me a lot problems. Where can
I get more information about it?

> > I tried using an interleave controller to avoid such problem, but the
> > interleave controller just deals requests for each thread, so the result
> > is the same.
> 
> > Anyway, I've also tried having one thread group and 100 threads within
> > it sending the same HTTP request, but I still have the performance
> > problem I commented in my previous email.
> >
> > Iago.
> >
> >
> > >
> > > On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > >
> > > > hi!,
> > > >
> > > > I'm using Jmeter to perform a peak test of my web server (100 http
> > > > requests at the same time). To do such, I've created 100 thread groups,
> > > > each one with one thread that sends a different http request. At the web
> > > > server I log the time (in milliseconds) at which each request is
> > > > received.
> > > >
> > > > I need these requests to be sent to the web server as close as posible
> > > > but I noticed they are are logged at the web server in a period of time
> > > > that varies but is never lesser than 0.8 secs.
> > > >
> > > > ¿Shouldn't jmeter be able to send 100 requests in a leesser period of
> > > > time? ¿Is there any way to boost the launching of these requests?
> > > >
> > > > I've also noticed that, if I enable the option to parse HTML in each
> > > > HTTP request (HTTPSampler.image_parser in jmx file), my web server log
> > > > tells me that jmeter needs 2 or even more seconds to send all the 100
> > > > requests, which leads me to think that some threads start processing its
> > > > response before all requests have been sent ¿can I change this
> > > > behaviour? This is a big problem, because this way, Jmeter is limited in
> > > > its capacity to send the requests as soon as posible to stress the
> > > > server.
> > > >
> > > > My test machine has the following features:
> > > > CPU: 2.4 GHz
> > > > RAM: 512 MB
> > > > OS:  Debian Linux. Kernel 2.6.12.
> > > >
> > > > Thanks in advance for your help.
> > > > --
> > > > Abel Iago Toral Quiroga
> > > > Igalia http://www.igalia.com
> > > >
> > > > -
> > > > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > > > For additional commands, e-mail: [EMAIL PROTECTED]
> > > >
> > > >
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> >
> >
> 
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
-- 
Abel Iago Toral Quiroga 
Igalia http://www.igalia.com

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: When to use which http sampler?

2005-12-09 Thread sebb
I believe it also supports NTLM authentication even when running on
non-Windows hosts.

It's a lot more configurable. Adding slow connections to the default
implementation is hard work. The Apache implementation has proper
Cookie handling (though we aren't using that yet). It also has highly
configurable logging, should that be needed.

However, there are some features of the original HttpSampler that have
not yet been implemented in the new sampler. We're trying to add them
as problems arise, but day jobs and other commitments can get in the
way...

If there are features which don't yet work in the Apache Http Sampler,
please feel free to create a Bugzilla issue. Test cases are a help.
Patches even better...

S.
On 09/12/05, Peter Lin <[EMAIL PROTECTED]> wrote:
> the advantage of the HTTPClient version is it supports keep alive and slow
> connections correctly. the default sun implementation does not and hasn't
> since the beginning.
>
> peter
>
>
> On 12/9/05, Christensen, Alan <[EMAIL PROTECTED]> wrote:
> >
> > I originally wrote all my scripts using the "HTTP Request" sampler.  Now
> > there is a second one that is called "HTTP Request HTTPClient".  What is
> > the advantage of using one vs the other?
> >
> > So far the "HTTP Request" sampler has worked well for my tests except
> > that the line speed property cannot be used with this sampler.
> >
> > The "HTTP Request HTTPClient" sampler does work with the line speed
> > property but the current version in the December 7th nightly build
> > doesn't seem to work with https (parameter issue?) and doesn't
> > decompress pages that were requested with compression enabled.  Hence
> > all assertions fail.  These issues would seem to make this new sampler
> > not worth using unless you must take advantage of the line speed
> > property.  Are there any advantages of this sampler that I don't know
> > about?  Why was it developed?
> >
> >
> >
> >
>
>

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: Constant Throughput Timer performance

2005-12-09 Thread sebb
On 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> Thanks for your comment sebb,
>
> if I have more than one thread in each thread group my problem is
> ensuring that each thread launches a different request, because each
> thread will send the same sequence of requests under the threadgroup.
> I've tried using an interleave controller, but it deals the requests for
> each thread and not for all the threads in the threadgroup :(

See my reply to the other thread.

Let's close this one now.

> Iago.
>
> El vie, 09-12-2005 a las 18:01, sebb escribió:
> > I suspect part of the problem is that all the threads start at once,
> > and having 100 thread groups with only 1 thread in each will make it
> > tedious to fix - you'll need to add a gradually increasing delay to
> > each of the thread groups.
> > What happens if you have fewer thread groups and more threads in each group?
> > You can set the ramp-up for each thread-group to ensure that the
> > threads start more evenly.
> >
> > S.
> > On 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > Hi,
> > >
> > > I've configured a test with 100 thread groups (one thread per thread
> > > group) and added a constant throughput timer to get a 10 requests per
> > > second performance. To do so, I configured target throughput to 600
> > > (samples per minute) and selected to compute performance based on all
> > > active threads.
> > >
> > > The result is as expected, I get an average throughput of 10 requests
> > > per second, but they are not uniform along the time. What I get is
> > > something like this:
> > >
> > > At second 0, jmeter launches 100 requests to the server. At second 4,
> > > jmeter has received all the responses, but because it has lauched 100
> > > requests at second 0, it must wait till second 10 to start another bunch
> > > of 100 requests. What I expect from this kind of tests is getting 10
> > > requests per second *each second*.
> > >
> > > This kind of behaviour is much more like a repeated peak test than a
> > > constant troughput test. I know I can get a more uniform test by droping
> > > the thread count so jmeter would have to wait less time to launch the
> > > next bunch of requests, but that is weird and still a trick that does
> > > not solve the point of problem at all ¿I'm missing something?, ¿is there
> > > a way to get a more uniform behaviour for this kind of tests?
> > >
> > > Thanks in advance for your help!
> > > --
> > > Abel Iago Toral Quiroga
> > > Igalia http://www.igalia.com
> > >
> > > -
> > > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > > For additional commands, e-mail: [EMAIL PROTECTED]
> > >
> > >
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> --
> Abel Iago Toral Quiroga
> Igalia http://www.igalia.com
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
>

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: Peak tests

2005-12-09 Thread sebb
On 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> El vie, 09-12-2005 a las 18:10, sebb escribió:
> > n 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > El vie, 09-12-2005 a las 15:17, Peter Lin escribió:
> > > > I'm not sure I understand why you have 100 thread groups.
> > > >
> > > > you can put the requests in sequence in 1 threadGroup and increase the
> > > > thread count to 100 with 0 second ramp up.
> > > > peter
> > >
> > > Because the requests must be different. If I do what you say,
> > > all the 100 threads within the threadgroup will send the same
> > > request (the first one in the sequence).
> >
> > Not necessarily. You can use variables in the requests, and read the
> > variables from a file using CSV Data Set. Each thread will get a
> > different line from the file (unless it wraps round).
>
> mmm... really interesting, that could save me a lot problems. Where can
> I get more information about it?

>From the User manual:

http://jakarta.apache.org/jmeter/usermanual/component_reference.html#CSV_Data_Set_Config

Also have a look at the Wiki, particularly the JMeter FAQ.

The following is an old example, using User Parameters and StringFromFile:

http://wiki.apache.org/jakarta-jmeter/JMeterFAQ#head-1680863678257fbcb85bd97351860eb0049f19ae

You can now use CSV Data Set instead (which was added since that
example was written).

If you solve a problem that's not documented, please consider updating
the Wiki ...

> > > I tried using an interleave controller to avoid such problem, but the
> > > interleave controller just deals requests for each thread, so the result
> > > is the same.
> >
> > > Anyway, I've also tried having one thread group and 100 threads within
> > > it sending the same HTTP request, but I still have the performance
> > > problem I commented in my previous email.
> > >
> > > Iago.
> > >
> > >
> > > >
> > > > On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > > >
> > > > > hi!,
> > > > >
> > > > > I'm using Jmeter to perform a peak test of my web server (100 http
> > > > > requests at the same time). To do such, I've created 100 thread 
> > > > > groups,
> > > > > each one with one thread that sends a different http request. At the 
> > > > > web
> > > > > server I log the time (in milliseconds) at which each request is
> > > > > received.
> > > > >
> > > > > I need these requests to be sent to the web server as close as posible
> > > > > but I noticed they are are logged at the web server in a period of 
> > > > > time
> > > > > that varies but is never lesser than 0.8 secs.
> > > > >
> > > > > ¿Shouldn't jmeter be able to send 100 requests in a leesser period of
> > > > > time? ¿Is there any way to boost the launching of these requests?
> > > > >
> > > > > I've also noticed that, if I enable the option to parse HTML in each
> > > > > HTTP request (HTTPSampler.image_parser in jmx file), my web server log
> > > > > tells me that jmeter needs 2 or even more seconds to send all the 100
> > > > > requests, which leads me to think that some threads start processing 
> > > > > its
> > > > > response before all requests have been sent ¿can I change this
> > > > > behaviour? This is a big problem, because this way, Jmeter is limited 
> > > > > in
> > > > > its capacity to send the requests as soon as posible to stress the
> > > > > server.
> > > > >
> > > > > My test machine has the following features:
> > > > > CPU: 2.4 GHz
> > > > > RAM: 512 MB
> > > > > OS:  Debian Linux. Kernel 2.6.12.
> > > > >
> > > > > Thanks in advance for your help.
> > > > > --
> > > > > Abel Iago Toral Quiroga
> > > > > Igalia http://www.igalia.com
> > > > >
> > > > > -
> > > > > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > > > > For additional commands, e-mail: [EMAIL PROTECTED]
> > > > >
> > > > >
> > >
> > > -
> > > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > > For additional commands, e-mail: [EMAIL PROTECTED]
> > >
> > >
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> --
> Abel Iago Toral Quiroga
> Igalia http://www.igalia.com
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
>

-
To unsubscribe, e-mail: [EMAIL PROTECTED]
For additional commands, e-mail: [EMAIL PROTECTED]



Re: Constant Throughput Timer performance

2005-12-09 Thread Peter Lin
honestly, I don't understand why first request needs to be different for all
threads.  if the point is to measure an application's ability to handle a
sudden spike, it's better to pick a very heavy page and set 1 threadGroup
with 100 threads and hit it.

using different thread groups just means you have to ramp for a longer
period. I can't stress enough how hard it is to really get 100 concurrent
requests.  From my experience, what matters more is the system is able to
handle a sudden spike gracefully without brinding down the website and
return to normal operation once the spike has passed.  100 concurrent
requests for an average size webpage 10Kb, means that in an ideal situation
one would need a full 100mbit bandwidth. On a 10mbit bandwidth, it's never
going to reach that. It's physically impossible.

unless the hosting facility has a dedicated OC12, it won't be able to handle
100 concurrent.  for some perspective, 40 concurrent requests for 18hrs a
day translates to 10million pages views.  I know this from first hand
experience working at superpages.com.  98% of the sites out there don't get
any where near this kind of traffic.

peter


On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
>
> Thanks for your comment sebb,
>
> if I have more than one thread in each thread group my problem is
> ensuring that each thread launches a different request, because each
> thread will send the same sequence of requests under the threadgroup.
> I've tried using an interleave controller, but it deals the requests for
> each thread and not for all the threads in the threadgroup :(
>
> Iago.
>
> El vie, 09-12-2005 a las 18:01, sebb escribió:
> > I suspect part of the problem is that all the threads start at once,
> > and having 100 thread groups with only 1 thread in each will make it
> > tedious to fix - you'll need to add a gradually increasing delay to
> > each of the thread groups.
> > What happens if you have fewer thread groups and more threads in each
> group?
> > You can set the ramp-up for each thread-group to ensure that the
> > threads start more evenly.
> >
> > S.
> > On 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > Hi,
> > >
> > > I've configured a test with 100 thread groups (one thread per thread
> > > group) and added a constant throughput timer to get a 10 requests per
> > > second performance. To do so, I configured target throughput to 600
> > > (samples per minute) and selected to compute performance based on all
> > > active threads.
> > >
> > > The result is as expected, I get an average throughput of 10 requests
> > > per second, but they are not uniform along the time. What I get is
> > > something like this:
> > >
> > > At second 0, jmeter launches 100 requests to the server. At second 4,
> > > jmeter has received all the responses, but because it has lauched 100
> > > requests at second 0, it must wait till second 10 to start another
> bunch
> > > of 100 requests. What I expect from this kind of tests is getting 10
> > > requests per second *each second*.
> > >
> > > This kind of behaviour is much more like a repeated peak test than a
> > > constant troughput test. I know I can get a more uniform test by
> droping
> > > the thread count so jmeter would have to wait less time to launch the
> > > next bunch of requests, but that is weird and still a trick that does
> > > not solve the point of problem at all ¿I'm missing something?, ¿is
> there
> > > a way to get a more uniform behaviour for this kind of tests?
> > >
> > > Thanks in advance for your help!
> > > --
> > > Abel Iago Toral Quiroga
> > > Igalia http://www.igalia.com
> > >
> > > -
> > > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > > For additional commands, e-mail: [EMAIL PROTECTED]
> > >
> > >
> >
> > -
> > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > For additional commands, e-mail: [EMAIL PROTECTED]
> --
> Abel Iago Toral Quiroga
> Igalia http://www.igalia.com
>
> -
> To unsubscribe, e-mail: [EMAIL PROTECTED]
> For additional commands, e-mail: [EMAIL PROTECTED]
>
>


Re: Constant Throughput Timer performance

2005-12-09 Thread Iago Toral Quiroga
El vie, 09-12-2005 a las 18:49, Peter Lin escribió:
> honestly, I don't understand why first request needs to be different for all
> threads.  if the point is to measure an application's ability to handle a
> sudden spike, it's better to pick a very heavy page and set 1 threadGroup
> with 100 threads and hit it.

Because the web server is serving a GIS application that has a cache
system. So I need all the requests to be different in order to avoid
cached responses.

> using different thread groups just means you have to ramp for a longer
> period. I can't stress enough how hard it is to really get 100 concurrent
> requests.  From my experience, what matters more is the system is able to
> handle a sudden spike gracefully without brinding down the website and
> return to normal operation once the spike has passed.  100 concurrent
> requests for an average size webpage 10Kb, means that in an ideal situation
> one would need a full 100mbit bandwidth. On a 10mbit bandwidth, it's never
> going to reach that. It's physically impossible.
> 
> unless the hosting facility has a dedicated OC12, it won't be able to handle
> 100 concurrent.  for some perspective, 40 concurrent requests for 18hrs a
> day translates to 10million pages views.  I know this from first hand
> experience working at superpages.com.  98% of the sites out there don't get
> any where near this kind of traffic.
> 

I'm not talking about of being able to serve 100 requests in one second
to the clients. What I want to know is what happens at the server when
100 requests appear simultaneouly. Surely I need a huge bandwidth to
give response to all those requests, but not to get the requests
themselves, which is the point. A http request is very short in size,
let's say 500 bytes, so you don't need a huge bandwidth in order to
receive them. 

So, if I can receive 100 simultaneous requests, what will happen to my
server? will it crash? will it refuse connections? will it be able to
continue working? at which performce? etc... that is what I want to
know.

> On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> >
> > Thanks for your comment sebb,
> >
> > if I have more than one thread in each thread group my problem is
> > ensuring that each thread launches a different request, because each
> > thread will send the same sequence of requests under the threadgroup.
> > I've tried using an interleave controller, but it deals the requests for
> > each thread and not for all the threads in the threadgroup :(
> >
> > Iago.
> >
> > El vie, 09-12-2005 a las 18:01, sebb escribió:
> > > I suspect part of the problem is that all the threads start at once,
> > > and having 100 thread groups with only 1 thread in each will make it
> > > tedious to fix - you'll need to add a gradually increasing delay to
> > > each of the thread groups.
> > > What happens if you have fewer thread groups and more threads in each
> > group?
> > > You can set the ramp-up for each thread-group to ensure that the
> > > threads start more evenly.
> > >
> > > S.
> > > On 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > > Hi,
> > > >
> > > > I've configured a test with 100 thread groups (one thread per thread
> > > > group) and added a constant throughput timer to get a 10 requests per
> > > > second performance. To do so, I configured target throughput to 600
> > > > (samples per minute) and selected to compute performance based on all
> > > > active threads.
> > > >
> > > > The result is as expected, I get an average throughput of 10 requests
> > > > per second, but they are not uniform along the time. What I get is
> > > > something like this:
> > > >
> > > > At second 0, jmeter launches 100 requests to the server. At second 4,
> > > > jmeter has received all the responses, but because it has lauched 100
> > > > requests at second 0, it must wait till second 10 to start another
> > bunch
> > > > of 100 requests. What I expect from this kind of tests is getting 10
> > > > requests per second *each second*.
> > > >
> > > > This kind of behaviour is much more like a repeated peak test than a
> > > > constant troughput test. I know I can get a more uniform test by
> > droping
> > > > the thread count so jmeter would have to wait less time to launch the
> > > > next bunch of requests, but that is weird and still a trick that does
> > > > not solve the point of problem at all ¿I'm missing something?, ¿is
> > there
> > > > a way to get a more uniform behaviour for this kind of tests?
> > > >
> > > > Thanks in advance for your help!
> > > > --
> > > > Abel Iago Toral Quiroga
> > > > Igalia http://www.igalia.com
> > > >
> > > > -
> > > > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > > > For additional commands, e-mail: [EMAIL PROTECTED]
> > > >
> > > >
> > >
> > > -
> > > To unsubscribe, e-mail: [EMAIL PROTECTED]
> > > For additional commands, 

Re: Constant Throughput Timer performance

2005-12-09 Thread Peter Lin
thanks for explaining. that makes sense now. given the application is
caching, having different requests would be crucial for valid measurement.
chances are, you'll need to use atleast 4 clients and split the test plan
into 4 smaller test plans.  this way, it increases the chances that the
threads will have a shorter delay between each thread.

in the past, when I've had to test applications with cache, we made it so
the cache can be turned off.  This way, we can test the impact of concurrent
queries, versus the webserver's ability to handle 100 concurrent requests.
If your application doesn't have the capability, it's really going to be
hard to effectively test the impact of traffic spike.

peter


On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
>
> El vie, 09-12-2005 a las 18:49, Peter Lin escribió:
> > honestly, I don't understand why first request needs to be different for
> all
> > threads.  if the point is to measure an application's ability to handle
> a
> > sudden spike, it's better to pick a very heavy page and set 1
> threadGroup
> > with 100 threads and hit it.
>
> Because the web server is serving a GIS application that has a cache
> system. So I need all the requests to be different in order to avoid
> cached responses.
>
> > using different thread groups just means you have to ramp for a longer
> > period. I can't stress enough how hard it is to really get 100
> concurrent
> > requests.  From my experience, what matters more is the system is able
> to
> > handle a sudden spike gracefully without brinding down the website and
> > return to normal operation once the spike has passed.  100 concurrent
> > requests for an average size webpage 10Kb, means that in an ideal
> situation
> > one would need a full 100mbit bandwidth. On a 10mbit bandwidth, it's
> never
> > going to reach that. It's physically impossible.
> >
> > unless the hosting facility has a dedicated OC12, it won't be able to
> handle
> > 100 concurrent.  for some perspective, 40 concurrent requests for 18hrs
> a
> > day translates to 10million pages views.  I know this from first hand
> > experience working at superpages.com.  98% of the sites out there don't
> get
> > any where near this kind of traffic.
> >
>
> I'm not talking about of being able to serve 100 requests in one second
> to the clients. What I want to know is what happens at the server when
> 100 requests appear simultaneouly. Surely I need a huge bandwidth to
> give response to all those requests, but not to get the requests
> themselves, which is the point. A http request is very short in size,
> let's say 500 bytes, so you don't need a huge bandwidth in order to
> receive them.
>
> So, if I can receive 100 simultaneous requests, what will happen to my
> server? will it crash? will it refuse connections? will it be able to
> continue working? at which performce? etc... that is what I want to
> know.
>
> > On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > >
> > > Thanks for your comment sebb,
> > >
> > > if I have more than one thread in each thread group my problem is
> > > ensuring that each thread launches a different request, because each
> > > thread will send the same sequence of requests under the threadgroup.
> > > I've tried using an interleave controller, but it deals the requests
> for
> > > each thread and not for all the threads in the threadgroup :(
> > >
> > > Iago.
> > >
> > > El vie, 09-12-2005 a las 18:01, sebb escribió:
> > > > I suspect part of the problem is that all the threads start at once,
> > > > and having 100 thread groups with only 1 thread in each will make it
> > > > tedious to fix - you'll need to add a gradually increasing delay to
> > > > each of the thread groups.
> > > > What happens if you have fewer thread groups and more threads in
> each
> > > group?
> > > > You can set the ramp-up for each thread-group to ensure that the
> > > > threads start more evenly.
> > > >
> > > > S.
> > > > On 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > > > Hi,
> > > > >
> > > > > I've configured a test with 100 thread groups (one thread per
> thread
> > > > > group) and added a constant throughput timer to get a 10 requests
> per
> > > > > second performance. To do so, I configured target throughput to
> 600
> > > > > (samples per minute) and selected to compute performance based on
> all
> > > > > active threads.
> > > > >
> > > > > The result is as expected, I get an average throughput of 10
> requests
> > > > > per second, but they are not uniform along the time. What I get is
> > > > > something like this:
> > > > >
> > > > > At second 0, jmeter launches 100 requests to the server. At second
> 4,
> > > > > jmeter has received all the responses, but because it has lauched
> 100
> > > > > requests at second 0, it must wait till second 10 to start another
> > > bunch
> > > > > of 100 requests. What I expect from this kind of tests is getting
> 10
> > > > > requests per second *each second*.
> > > > >
> > > > >

Re: Constant Throughput Timer performance

2005-12-09 Thread sebb
On 09/12/05, Peter Lin <[EMAIL PROTECTED]> wrote:
> thanks for explaining. that makes sense now. given the application is
> caching, having different requests would be crucial for valid measurement.
> chances are, you'll need to use atleast 4 clients and split the test plan
> into 4 smaller test plans.  this way, it increases the chances that the
> threads will have a shorter delay between each thread.
>
> in the past, when I've had to test applications with cache, we made it so
> the cache can be turned off.  This way, we can test the impact of concurrent
> queries, versus the webserver's ability to handle 100 concurrent requests.
> If your application doesn't have the capability, it's really going to be
> hard to effectively test the impact of traffic spike.

Unless you can add some variability to the URL to ensure that the
cache does not contain the request.

> peter
>
>
> On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> >
> > El vie, 09-12-2005 a las 18:49, Peter Lin escribió:
> > > honestly, I don't understand why first request needs to be different for
> > all
> > > threads.  if the point is to measure an application's ability to handle
> > a
> > > sudden spike, it's better to pick a very heavy page and set 1
> > threadGroup
> > > with 100 threads and hit it.
> >
> > Because the web server is serving a GIS application that has a cache
> > system. So I need all the requests to be different in order to avoid
> > cached responses.
> >
> > > using different thread groups just means you have to ramp for a longer
> > > period. I can't stress enough how hard it is to really get 100
> > concurrent
> > > requests.  From my experience, what matters more is the system is able
> > to
> > > handle a sudden spike gracefully without brinding down the website and
> > > return to normal operation once the spike has passed.  100 concurrent
> > > requests for an average size webpage 10Kb, means that in an ideal
> > situation
> > > one would need a full 100mbit bandwidth. On a 10mbit bandwidth, it's
> > never
> > > going to reach that. It's physically impossible.
> > >
> > > unless the hosting facility has a dedicated OC12, it won't be able to
> > handle
> > > 100 concurrent.  for some perspective, 40 concurrent requests for 18hrs
> > a
> > > day translates to 10million pages views.  I know this from first hand
> > > experience working at superpages.com.  98% of the sites out there don't
> > get
> > > any where near this kind of traffic.
> > >
> >
> > I'm not talking about of being able to serve 100 requests in one second
> > to the clients. What I want to know is what happens at the server when
> > 100 requests appear simultaneouly. Surely I need a huge bandwidth to
> > give response to all those requests, but not to get the requests
> > themselves, which is the point. A http request is very short in size,
> > let's say 500 bytes, so you don't need a huge bandwidth in order to
> > receive them.
> >
> > So, if I can receive 100 simultaneous requests, what will happen to my
> > server? will it crash? will it refuse connections? will it be able to
> > continue working? at which performce? etc... that is what I want to
> > know.
> >
> > > On 12/9/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > >
> > > > Thanks for your comment sebb,
> > > >
> > > > if I have more than one thread in each thread group my problem is
> > > > ensuring that each thread launches a different request, because each
> > > > thread will send the same sequence of requests under the threadgroup.
> > > > I've tried using an interleave controller, but it deals the requests
> > for
> > > > each thread and not for all the threads in the threadgroup :(
> > > >
> > > > Iago.
> > > >
> > > > El vie, 09-12-2005 a las 18:01, sebb escribió:
> > > > > I suspect part of the problem is that all the threads start at once,
> > > > > and having 100 thread groups with only 1 thread in each will make it
> > > > > tedious to fix - you'll need to add a gradually increasing delay to
> > > > > each of the thread groups.
> > > > > What happens if you have fewer thread groups and more threads in
> > each
> > > > group?
> > > > > You can set the ramp-up for each thread-group to ensure that the
> > > > > threads start more evenly.
> > > > >
> > > > > S.
> > > > > On 09/12/05, Iago Toral Quiroga <[EMAIL PROTECTED]> wrote:
> > > > > > Hi,
> > > > > >
> > > > > > I've configured a test with 100 thread groups (one thread per
> > thread
> > > > > > group) and added a constant throughput timer to get a 10 requests
> > per
> > > > > > second performance. To do so, I configured target throughput to
> > 600
> > > > > > (samples per minute) and selected to compute performance based on
> > all
> > > > > > active threads.
> > > > > >
> > > > > > The result is as expected, I get an average throughput of 10
> > requests
> > > > > > per second, but they are not uniform along the time. What I get is
> > > > > > something like this:
> > > > > >
> > > > > > At second 0, jmeter