I'll try to post some code in this box, don't know how the formatting will 
come through.
I decided to do everything in coffee script, because, javascript is pretty 
ugly.

using express framework:

here is the "login." 

exports.login = (req, res) ->
async.auto ({
token: async.apply(userService.generateToken, 32, null)
user: async.apply(userService.getWithNamePassword, req.query.name, 
req.query.password)
 update: ['token', 'user', (callback, results) ->
log.debug(results.user);
results.user.token = results.token;
req.user = results.user
callback(null, null)
]
 save: ['update', (callback) -> 
userService.saveDbUserToken(req.user, callback)
callback(null, null)
]
 assemble: ['save', (callback) ->
userService.getAssembled(req.user, callback)
],
 send: ['assemble', (callback, results) ->
res.send(results.assemble)
]
});

//-------------------------

Here is some relevant portions of the "userService"  I added the "now" 
 date stuff, to profile the queries.
Queries are taking < 0.001 seconds.  DB time is negligible.

exports.generateToken = (len, charSet, callback) ->
charSet = charSet || 
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
randomString = '';
for num in [0..len]
randomPoz = Math.floor(Math.random() * charSet.length);
randomString += charSet.substring(randomPoz,randomPoz+1);

now = new Date();
token = now.getTime() + '|' + randomString; 
callback(null, token)

exports.getWithName = (name, callback) ->
now = new Date();
time = now.getSeconds() + now.getMilliseconds() * 0.001;
before = time
uq_count++;
count = uq_count;

console.log("U+(" + count + ") " + time);
m.db.collection('users').findOne(
{name:name},
(err, data) ->
now = new Date();
time = now.getSeconds() + now.getMilliseconds() * 0.001;
console.log("U-(" + count + ") " + time + " = " + (time - before));
 callback(err, data)
)
exports.getWithNamePassword = (name, password, callback) ->
now = new Date();
exports.getWithName(
name,
(err, user) ->
if (user.password != password)
callback("validation failed", null)
else
callback(null, user)
)



exports.getAssembled = (user, callback) ->
async.auto({
shop: async.apply(exports.getShop)
constants: async.apply(exports.getConstants)
journal: (callback) ->
actionService.journalFor(user, callback)
 assemble: ['shop', 'constants', 'journal', (callback, results) ->
final = { account: user.data.account, garden: user.data.garden, friends: 
user.data.friends }
final.shop = results.shop;
final.constants = results.constants;
final.journal = results.journal;
 callback(null, final)
]
},(err, results) ->
if (!err)
callback(err, results.assemble)
else
callback(err, null)
)



On Tuesday, April 10, 2012 1:51:37 PM UTC-4, Marco Rogers wrote:
>
> Yeah I think you need to provide some code. It should actually be pretty 
> difficult for you to get throughput that slow from node :) I suspect that 
> you're not doing you async callbacks correctly and blocking the event loop 
> more than you should be.
>
> :Marco
>
> On Tuesday, April 10, 2012 10:15:34 AM UTC-7, timp wrote:
>>
>> Greetings,
>>
>> I was wondering if someone who has experience with working with scaling 
>> problems could give me some insight on an issue.
>>
>> Long story:
>>
>> A)
>>
>> So- I'm making an iPhone/iPad app, which basically is a spiffy view of 
>> json data provided by a server.  The app makes modifications to the json, 
>> the server stores them in mongodb, etc.  Model view in the form of a game.
>>
>> I set one of my goals as to have the server run as cheaply as possible, 
>> so that even if the app was not popular, I could keep it up with minimal 
>> cost/overhead.
>>
>>
>> B)
>>
>> At first I wrote the server in symfony2.  But, I'm weary of php, 
>> especially when I read the documentation for the "apns" (apple push 
>> notification) plugin, which basically says, "well, I wouldn't use what I 
>> wrote if you have a lot of users, but you can if you want."
>>
>> C)
>>
>> So then I said, let's do "grails."  Promises to be fast.  I like 
>> java/groovy whatever.  I like that I can refactor java without wondering 
>> what I'm breaking.  But then the throughput was *very* bad.   Let's say, 20 
>> connections per second, in production war mode, to do trivial (but not 
>> hello world, json blahblah blah) things.
>>
>> I've done some some work on games.  1 seconds on my modern computer is 
>> like a whole day on a computer 10 years ago.  It is unacceptable to have 
>> only 10-20 pages a second, even if I've messed up the configuration. 
>>
>> D)
>>
>> So then, this programmer next to me was talking about nodeJS.  About how 
>> great it is.  So this last friday, I ported the server (which is all of 500 
>> lines or so) from grails to nodeJS.  Using all the callback stuff, 
>> async.auto, etc etc..    But then I'm getting on 60 connections per second. 
>>  Apparently Json is actually really slow or something.  I did some 
>> optimization (one of which is just clipping data, which is unacceptable), 
>> and profiled and got it to ~120 a second.  (I'm using the most intensive 
>> function as a baseline).
>>
>> E)
>> So then, I thought: "this is ridiculous."  (And I became compulsive. 
>>  Which sucks.)  And on Saturday and Sunday, I wrote a non blocking web 
>> server in c++, modules and everything.  (I did rip and clean code from a 
>> previously personal project).  And I'm getting ~2000 completions a second. 
>>
>>
>> So you're reading this and saying, "duh.  Custom compiled c++ will of 
>> course be faster than javascript running in a generic framework.  And 
>> you've probably messed up configuration or something somewhere."
>>
>> I know this.  I feel like I just lost the weekend to some stupid 
>> compulsion. But at the same time, I am truly annoyed at how slow these web 
>> servers/frameworks are.
>>
>>
>> ------------------------
>> ------------------------
>> ------------------------
>>
>>
>> So my question is this:
>>
>> How do real scaling companies deal with this problem?  
>> Non cacheable non static page server.
>>
>> Does scaling become a business decision?  
>>
>> Are things really slow, just because they can be?  Why do people think 
>> "nodeJS fast" ?
>>
>> Or do real scaling companies have C/C++ services, which accept 
>> connections from a front end in nodeJS.  (because when nodeJS doesn't 
>> actually do anything, it really is only 1/6 slower than pure C/C++ 
>> solution, which, I guess, is pretty fast.)?
>>
>> How close is v8 to a theoretical maximum?  
>> (theoretical maximum would be, I guess, compiled code is near gcc -O3, 
>> and compilation is linear with a small coefficient.)
>>
>> Why aren't web server pages through lvmm?  It should be possible to 
>> create a fibered (which is really what continuations come down to it seems 
>> to me at this moment), lvmm, jitter, any language (which lvmm supports), 
>> solution to a web page.
>>
>> ----
>>
>> So, if anyone has any thoughts, let me know.  If I've said anything which 
>> may be rude, I am not trying to be.
>> In previous experience, I was never concerned with speed, because I was 
>> never before, directly paying for it.
>>
>> I guess I'll run the server on nodeJS, and just load balance it if I ever 
>> need to.
>> Although, I would very much rather have a lvmm'd pages plugged into a 
>> C/C++ server.
>>
>> ----
>>
>> Thoughts?  I this all academic: if things are popular, you will make 
>> money, and scaling will becoming a business issue?
>>
>>
>> -tim
>>
>>

-- 
Job Board: http://jobs.nodejs.org/
Posting guidelines: 
https://github.com/joyent/node/wiki/Mailing-List-Posting-Guidelines
You received this message because you are subscribed to the Google
Groups "nodejs" group.
To post to this group, send email to nodejs@googlegroups.com
To unsubscribe from this group, send email to
nodejs+unsubscr...@googlegroups.com
For more options, visit this group at
http://groups.google.com/group/nodejs?hl=en?hl=en

Reply via email to